summaryrefslogtreecommitdiff
path: root/vendor/golang.org/x/net
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/net')
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go47
-rw-r--r--vendor/golang.org/x/net/http2/errors.go12
-rw-r--r--vendor/golang.org/x/net/http2/frame.go62
-rw-r--r--vendor/golang.org/x/net/http2/hpack/huffman.go38
-rw-r--r--vendor/golang.org/x/net/http2/pipe.go11
-rw-r--r--vendor/golang.org/x/net/http2/server.go101
-rw-r--r--vendor/golang.org/x/net/http2/transport.go1424
-rw-r--r--vendor/golang.org/x/net/idna/go118.go14
-rw-r--r--vendor/golang.org/x/net/idna/idna10.0.0.go6
-rw-r--r--vendor/golang.org/x/net/idna/idna9.0.0.go4
-rw-r--r--vendor/golang.org/x/net/idna/pre_go118.go12
-rw-r--r--vendor/golang.org/x/net/idna/punycode.go36
12 files changed, 1077 insertions, 690 deletions
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
index 652bc11a0..c936843ea 100644
--- a/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -16,6 +16,12 @@ import (
// ClientConnPool manages a pool of HTTP/2 client connections.
type ClientConnPool interface {
+ // GetClientConn returns a specific HTTP/2 connection (usually
+ // a TLS-TCP connection) to an HTTP/2 server. On success, the
+ // returned ClientConn accounts for the upcoming RoundTrip
+ // call, so the caller should not omit it. If the caller needs
+ // to, ClientConn.RoundTrip can be called with a bogus
+ // new(http.Request) to release the stream reservation.
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
MarkDead(*ClientConn)
}
@@ -42,7 +48,7 @@ type clientConnPool struct {
conns map[string][]*ClientConn // key is host:port
dialing map[string]*dialCall // currently in-flight dials
keys map[*ClientConn][]string
- addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
+ addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls
}
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
@@ -54,28 +60,8 @@ const (
noDialOnMiss = false
)
-// shouldTraceGetConn reports whether getClientConn should call any
-// ClientTrace.GetConn hook associated with the http.Request.
-//
-// This complexity is needed to avoid double calls of the GetConn hook
-// during the back-and-forth between net/http and x/net/http2 (when the
-// net/http.Transport is upgraded to also speak http2), as well as support
-// the case where x/net/http2 is being used directly.
-func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
- // If our Transport wasn't made via ConfigureTransport, always
- // trace the GetConn hook if provided, because that means the
- // http2 package is being used directly and it's the one
- // dialing, as opposed to net/http.
- if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
- return true
- }
- // Otherwise, only use the GetConn hook if this connection has
- // been used previously for other requests. For fresh
- // connections, the net/http package does the dialing.
- return !st.freshConn
-}
-
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+ // TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
if isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection.
traceGetConn(req, addr)
@@ -89,10 +75,14 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
for {
p.mu.Lock()
for _, cc := range p.conns[addr] {
- if st := cc.idleState(); st.canTakeNewRequest {
- if p.shouldTraceGetConn(st) {
+ if cc.ReserveNewRequest() {
+ // When a connection is presented to us by the net/http package,
+ // the GetConn hook has already been called.
+ // Don't call it a second time here.
+ if !cc.getConnCalled {
traceGetConn(req, addr)
}
+ cc.getConnCalled = false
p.mu.Unlock()
return cc, nil
}
@@ -108,7 +98,13 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
if shouldRetryDial(call, req) {
continue
}
- return call.res, call.err
+ cc, err := call.res, call.err
+ if err != nil {
+ return nil, err
+ }
+ if cc.ReserveNewRequest() {
+ return cc, nil
+ }
}
}
@@ -205,6 +201,7 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
if err != nil {
c.err = err
} else {
+ cc.getConnCalled = true // already called by the net/http package
p.addConnLocked(key, cc)
}
delete(p.addConnCalls, key)
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
index 71f2c4631..2663e5d28 100644
--- a/vendor/golang.org/x/net/http2/errors.go
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -53,6 +53,13 @@ func (e ErrCode) String() string {
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
}
+func (e ErrCode) stringToken() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
+}
+
// ConnectionError is an error that results in the termination of the
// entire connection.
type ConnectionError ErrCode
@@ -67,6 +74,11 @@ type StreamError struct {
Cause error // optional additional detail
}
+// errFromPeer is a sentinel error value for StreamError.Cause to
+// indicate that the StreamError was sent from the peer over the wire
+// and wasn't locally generated in the Transport.
+var errFromPeer = errors.New("received from peer")
+
func streamError(id uint32, code ErrCode) StreamError {
return StreamError{StreamID: id, Code: code}
}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 514c126c5..96a747905 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
// a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which
// might be 0).
-type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
+type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
var frameParsers = map[FrameType]frameParser{
FrameData: parseDataFrame,
@@ -267,6 +267,11 @@ type Framer struct {
lastFrame Frame
errDetail error
+ // countError is a non-nil func that's called on a frame parse
+ // error with some unique error path token. It's initialized
+ // from Transport.CountError or Server.CountError.
+ countError func(errToken string)
+
// lastHeaderStream is non-zero if the last frame was an
// unfinished HEADERS/CONTINUATION.
lastHeaderStream uint32
@@ -426,6 +431,7 @@ func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{
w: w,
r: r,
+ countError: func(string) {},
logReads: logFrameReads,
logWrites: logFrameWrites,
debugReadLoggerf: log.Printf,
@@ -500,7 +506,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err
}
- f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
+ f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
if err != nil {
if ce, ok := err.(connError); ok {
return nil, fr.connError(ce.Code, ce.Reason)
@@ -588,13 +594,14 @@ func (f *DataFrame) Data() []byte {
return f.data
}
-func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
+func parseDataFrame(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier
// field is 0x0, the recipient MUST respond with a
// connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
+ countError("frame_data_stream_0")
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
}
f := fc.getDataFrame()
@@ -605,6 +612,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro
var err error
payload, padSize, err = readByte(payload)
if err != nil {
+ countError("frame_data_pad_byte_short")
return nil, err
}
}
@@ -613,6 +621,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro
// length of the frame payload, the recipient MUST
// treat this as a connection error.
// Filed: https://github.com/http2/http2-spec/issues/610
+ countError("frame_data_pad_too_big")
return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
}
f.data = payload[:len(payload)-int(padSize)]
@@ -695,7 +704,7 @@ type SettingsFrame struct {
p []byte
}
-func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseSettingsFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
// When this (ACK 0x1) bit is set, the payload of the
// SETTINGS frame MUST be empty. Receipt of a
@@ -703,6 +712,7 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error)
// field value other than 0 MUST be treated as a
// connection error (Section 5.4.1) of type
// FRAME_SIZE_ERROR.
+ countError("frame_settings_ack_with_length")
return nil, ConnectionError(ErrCodeFrameSize)
}
if fh.StreamID != 0 {
@@ -713,14 +723,17 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error)
// field is anything other than 0x0, the endpoint MUST
// respond with a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR.
+ countError("frame_settings_has_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
if len(p)%6 != 0 {
+ countError("frame_settings_mod_6")
// Expecting even number of 6 byte settings.
return nil, ConnectionError(ErrCodeFrameSize)
}
f := &SettingsFrame{FrameHeader: fh, p: p}
if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ countError("frame_settings_window_size_too_big")
// Values above the maximum flow control window size of 2^31 - 1 MUST
// be treated as a connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR.
@@ -832,11 +845,13 @@ type PingFrame struct {
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
-func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
+func parsePingFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
if len(payload) != 8 {
+ countError("frame_ping_length")
return nil, ConnectionError(ErrCodeFrameSize)
}
if fh.StreamID != 0 {
+ countError("frame_ping_has_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
f := &PingFrame{FrameHeader: fh}
@@ -872,11 +887,13 @@ func (f *GoAwayFrame) DebugData() []byte {
return f.debugData
}
-func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseGoAwayFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if fh.StreamID != 0 {
+ countError("frame_goaway_has_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
if len(p) < 8 {
+ countError("frame_goaway_short")
return nil, ConnectionError(ErrCodeFrameSize)
}
return &GoAwayFrame{
@@ -912,7 +929,7 @@ func (f *UnknownFrame) Payload() []byte {
return f.p
}
-func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
return &UnknownFrame{fh, p}, nil
}
@@ -923,8 +940,9 @@ type WindowUpdateFrame struct {
Increment uint32 // never read with high bit set
}
-func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if len(p) != 4 {
+ countError("frame_windowupdate_bad_len")
return nil, ConnectionError(ErrCodeFrameSize)
}
inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
@@ -936,8 +954,10 @@ func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, err
// control window MUST be treated as a connection
// error (Section 5.4.1).
if fh.StreamID == 0 {
+ countError("frame_windowupdate_zero_inc_conn")
return nil, ConnectionError(ErrCodeProtocol)
}
+ countError("frame_windowupdate_zero_inc_stream")
return nil, streamError(fh.StreamID, ErrCodeProtocol)
}
return &WindowUpdateFrame{
@@ -988,7 +1008,7 @@ func (f *HeadersFrame) HasPriority() bool {
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
}
-func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
+func parseHeadersFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
hf := &HeadersFrame{
FrameHeader: fh,
}
@@ -997,11 +1017,13 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er
// is received whose stream identifier field is 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
+ countError("frame_headers_zero_stream")
return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
}
var padLength uint8
if fh.Flags.Has(FlagHeadersPadded) {
if p, padLength, err = readByte(p); err != nil {
+ countError("frame_headers_pad_short")
return
}
}
@@ -1009,16 +1031,19 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er
var v uint32
p, v, err = readUint32(p)
if err != nil {
+ countError("frame_headers_prio_short")
return nil, err
}
hf.Priority.StreamDep = v & 0x7fffffff
hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
p, hf.Priority.Weight, err = readByte(p)
if err != nil {
+ countError("frame_headers_prio_weight_short")
return nil, err
}
}
- if len(p)-int(padLength) <= 0 {
+ if len(p)-int(padLength) < 0 {
+ countError("frame_headers_pad_too_big")
return nil, streamError(fh.StreamID, ErrCodeProtocol)
}
hf.headerFragBuf = p[:len(p)-int(padLength)]
@@ -1125,11 +1150,13 @@ func (p PriorityParam) IsZero() bool {
return p == PriorityParam{}
}
-func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
+func parsePriorityFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
if fh.StreamID == 0 {
+ countError("frame_priority_zero_stream")
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
}
if len(payload) != 5 {
+ countError("frame_priority_bad_length")
return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
}
v := binary.BigEndian.Uint32(payload[:4])
@@ -1172,11 +1199,13 @@ type RSTStreamFrame struct {
ErrCode ErrCode
}
-func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if len(p) != 4 {
+ countError("frame_rststream_bad_len")
return nil, ConnectionError(ErrCodeFrameSize)
}
if fh.StreamID == 0 {
+ countError("frame_rststream_zero_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
@@ -1202,8 +1231,9 @@ type ContinuationFrame struct {
headerFragBuf []byte
}
-func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseContinuationFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if fh.StreamID == 0 {
+ countError("frame_continuation_zero_stream")
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
}
return &ContinuationFrame{fh, p}, nil
@@ -1252,7 +1282,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
}
-func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
+func parsePushPromise(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
pp := &PushPromiseFrame{
FrameHeader: fh,
}
@@ -1263,6 +1293,7 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err
// with. If the stream identifier field specifies the value
// 0x0, a recipient MUST respond with a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
+ countError("frame_pushpromise_zero_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
// The PUSH_PROMISE frame includes optional padding.
@@ -1270,18 +1301,21 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err
var padLength uint8
if fh.Flags.Has(FlagPushPromisePadded) {
if p, padLength, err = readByte(p); err != nil {
+ countError("frame_pushpromise_pad_short")
return
}
}
p, pp.PromiseID, err = readUint32(p)
if err != nil {
+ countError("frame_pushpromise_promiseid_short")
return
}
pp.PromiseID = pp.PromiseID & (1<<31 - 1)
if int(padLength) > len(p) {
// like the DATA frame, error out if padding is longer than the body.
+ countError("frame_pushpromise_pad_too_big")
return nil, ConnectionError(ErrCodeProtocol)
}
pp.headerFragBuf = p[:len(p)-int(padLength)]
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
index a1ab2f056..fe0b84ccd 100644
--- a/vendor/golang.org/x/net/http2/hpack/huffman.go
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -140,25 +140,29 @@ func buildRootHuffmanNode() {
panic("unexpected size")
}
lazyRootHuffmanNode = newInternalNode()
- for i, code := range huffmanCodes {
- addDecoderNode(byte(i), code, huffmanCodeLen[i])
- }
-}
+ // allocate a leaf node for each of the 256 symbols
+ leaves := new([256]node)
+
+ for sym, code := range huffmanCodes {
+ codeLen := huffmanCodeLen[sym]
+
+ cur := lazyRootHuffmanNode
+ for codeLen > 8 {
+ codeLen -= 8
+ i := uint8(code >> codeLen)
+ if cur.children[i] == nil {
+ cur.children[i] = newInternalNode()
+ }
+ cur = cur.children[i]
+ }
+ shift := 8 - codeLen
+ start, end := int(uint8(code<<shift)), int(1<<shift)
-func addDecoderNode(sym byte, code uint32, codeLen uint8) {
- cur := lazyRootHuffmanNode
- for codeLen > 8 {
- codeLen -= 8
- i := uint8(code >> codeLen)
- if cur.children[i] == nil {
- cur.children[i] = newInternalNode()
+ leaves[sym].sym = byte(sym)
+ leaves[sym].codeLen = codeLen
+ for i := start; i < start+end; i++ {
+ cur.children[i] = &leaves[sym]
}
- cur = cur.children[i]
- }
- shift := 8 - codeLen
- start, end := int(uint8(code<<shift)), int(1<<shift)
- for i := start; i < start+end; i++ {
- cur.children[i] = &node{sym: sym, codeLen: codeLen}
}
}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
index 2a5399ec4..c15b8a771 100644
--- a/vendor/golang.org/x/net/http2/pipe.go
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -30,6 +30,17 @@ type pipeBuffer interface {
io.Reader
}
+// setBuffer initializes the pipe buffer.
+// It has no effect if the pipe is already closed.
+func (p *pipe) setBuffer(b pipeBuffer) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.err != nil || p.breakErr != nil {
+ return
+ }
+ p.b = b
+}
+
func (p *pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 19e449cfa..c67e9b7f5 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -130,6 +130,12 @@ type Server struct {
// If nil, a default scheduler is chosen.
NewWriteScheduler func() WriteScheduler
+ // CountError, if non-nil, is called on HTTP/2 server errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
// Internal state. This is a pointer (rather than embedded directly)
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
@@ -405,6 +411,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
fr := NewFramer(sc.bw, c)
+ if s.CountError != nil {
+ fr.countError = s.CountError
+ }
fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
@@ -1399,7 +1408,7 @@ func (sc *serverConn) processFrame(f Frame) error {
// First frame received must be SETTINGS.
if !sc.sawFirstSettings {
if _, ok := f.(*SettingsFrame); !ok {
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
}
sc.sawFirstSettings = true
}
@@ -1424,7 +1433,7 @@ func (sc *serverConn) processFrame(f Frame) error {
case *PushPromiseFrame:
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
default:
sc.vlogf("http2: server ignoring frame: %v", f.Header())
return nil
@@ -1444,7 +1453,7 @@ func (sc *serverConn) processPing(f *PingFrame) error {
// identifier field value other than 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
}
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
return nil
@@ -1463,7 +1472,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
}
if st == nil {
// "WINDOW_UPDATE can be sent by a peer that has sent a
@@ -1474,7 +1483,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
return nil
}
if !st.flow.add(int32(f.Increment)) {
- return streamError(f.StreamID, ErrCodeFlowControl)
+ return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
}
default: // connection-level flow control
if !sc.flow.add(int32(f.Increment)) {
@@ -1495,7 +1504,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
// identifying an idle stream is received, the
// recipient MUST treat this as a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
}
if st != nil {
st.cancelCtx()
@@ -1547,7 +1556,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
// Why is the peer ACKing settings we never sent?
// The spec doesn't mention this case, but
// hang up on them anyway.
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
}
return nil
}
@@ -1555,7 +1564,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
// This isn't actually in the spec, but hang up on
// suspiciously large settings frames or those with
// duplicate entries.
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
}
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
@@ -1622,7 +1631,7 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
// control window to exceed the maximum size as a
// connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR."
- return ConnectionError(ErrCodeFlowControl)
+ return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
}
}
return nil
@@ -1655,7 +1664,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
}
// "If a DATA frame is received whose stream is not in "open"
@@ -1672,7 +1681,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// and return any flow control bytes since we're not going
// to consume them.
if sc.inflow.available() < int32(f.Length) {
- return streamError(id, ErrCodeFlowControl)
+ return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
}
// Deduct the flow control from inflow, since we're
// going to immediately add it back in
@@ -1685,7 +1694,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Already have a stream error in flight. Don't send another.
return nil
}
- return streamError(id, ErrCodeStreamClosed)
+ return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
}
if st.body == nil {
panic("internal error: should have a body in this state")
@@ -1697,12 +1706,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
// value of a content-length header field does not equal the sum of the
// DATA frame payload lengths that form the body.
- return streamError(id, ErrCodeProtocol)
+ return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
}
if f.Length > 0 {
// Check whether the client has flow control quota.
if st.inflow.available() < int32(f.Length) {
- return streamError(id, ErrCodeFlowControl)
+ return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
}
st.inflow.take(int32(f.Length))
@@ -1710,7 +1719,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
wrote, err := st.body.Write(data)
if err != nil {
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
- return streamError(id, ErrCodeStreamClosed)
+ return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
}
if wrote != len(data) {
panic("internal error: bad Writer")
@@ -1796,7 +1805,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// stream identifier MUST respond with a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
if id%2 != 1 {
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
}
// A HEADERS frame can be used to create a new stream or
// send a trailer for an open one. If we already have a stream
@@ -1813,7 +1822,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// this state, it MUST respond with a stream error (Section 5.4.2) of
// type STREAM_CLOSED.
if st.state == stateHalfClosedRemote {
- return streamError(id, ErrCodeStreamClosed)
+ return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
}
return st.processTrailerHeaders(f)
}
@@ -1824,7 +1833,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// receives an unexpected stream identifier MUST respond with
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
if id <= sc.maxClientStreamID {
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
}
sc.maxClientStreamID = id
@@ -1841,14 +1850,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
if sc.curClientStreams+1 > sc.advMaxStreams {
if sc.unackedSettings == 0 {
// They should know better.
- return streamError(id, ErrCodeProtocol)
+ return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
}
// Assume it's a network race, where they just haven't
// received our last SETTINGS update. But actually
// this can't happen yet, because we don't yet provide
// a way for users to adjust server parameters at
// runtime.
- return streamError(id, ErrCodeRefusedStream)
+ return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
}
initialState := stateOpen
@@ -1858,7 +1867,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
st := sc.newStream(id, 0, initialState)
if f.HasPriority() {
- if err := checkPriority(f.StreamID, f.Priority); err != nil {
+ if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
return err
}
sc.writeSched.AdjustStream(st.id, f.Priority)
@@ -1902,15 +1911,15 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
sc := st.sc
sc.serveG.check()
if st.gotTrailerHeader {
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
}
st.gotTrailerHeader = true
if !f.StreamEnded() {
- return streamError(st.id, ErrCodeProtocol)
+ return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
}
if len(f.PseudoFields()) > 0 {
- return streamError(st.id, ErrCodeProtocol)
+ return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
}
if st.trailer != nil {
for _, hf := range f.RegularFields() {
@@ -1919,7 +1928,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
// TODO: send more details to the peer somehow. But http2 has
// no way to send debug data at a stream level. Discuss with
// HTTP folk.
- return streamError(st.id, ErrCodeProtocol)
+ return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
}
st.trailer[key] = append(st.trailer[key], hf.Value)
}
@@ -1928,13 +1937,13 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
return nil
}
-func checkPriority(streamID uint32, p PriorityParam) error {
+func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
if streamID == p.StreamDep {
// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
// Section 5.3.3 says that a stream can depend on one of its dependencies,
// so it's only self-dependencies that are forbidden.
- return streamError(streamID, ErrCodeProtocol)
+ return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
}
return nil
}
@@ -1943,7 +1952,7 @@ func (sc *serverConn) processPriority(f *PriorityFrame) error {
if sc.inGoAway {
return nil
}
- if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
+ if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
return err
}
sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
@@ -2000,7 +2009,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
isConnect := rp.method == "CONNECT"
if isConnect {
if rp.path != "" || rp.scheme != "" || rp.authority == "" {
- return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
@@ -2013,13 +2022,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
// "All HTTP/2 requests MUST include exactly one valid
// value for the :method, :scheme, and :path
// pseudo-header fields"
- return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
}
bodyOpen := !f.StreamEnded()
if rp.method == "HEAD" && bodyOpen {
// HEAD requests can't have bodies
- return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol))
}
rp.header = make(http.Header)
@@ -2102,7 +2111,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
var err error
url_, err = url.ParseRequestURI(rp.path)
if err != nil {
- return nil, nil, streamError(st.id, ErrCodeProtocol)
+ return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
}
requestURI = rp.path
}
@@ -2985,3 +2994,31 @@ func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
}
return false
}
+
+func (sc *serverConn) countError(name string, err error) error {
+ if sc == nil || sc.srv == nil {
+ return err
+ }
+ f := sc.srv.CountError
+ if f == nil {
+ return err
+ }
+ var typ string
+ var code ErrCode
+ switch e := err.(type) {
+ case ConnectionError:
+ typ = "conn"
+ code = ErrCode(e)
+ case StreamError:
+ typ = "stream"
+ code = ErrCode(e.Code)
+ default:
+ return err
+ }
+ codeStr := errCodeName[code]
+ if codeStr == "" {
+ codeStr = strconv.Itoa(int(code))
+ }
+ f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
+ return err
+}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index b261beb1d..6d6f88589 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -24,6 +24,7 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
+ "os"
"sort"
"strconv"
"strings"
@@ -51,6 +52,15 @@ const (
transportDefaultStreamMinRefresh = 4 << 10
defaultUserAgent = "Go-http-client/2.0"
+
+ // initialMaxConcurrentStreams is a connections maxConcurrentStreams until
+ // it's received servers initial SETTINGS frame, which corresponds with the
+ // spec's minimum recommended value.
+ initialMaxConcurrentStreams = 100
+
+ // defaultMaxConcurrentStreams is a connections default maxConcurrentStreams
+ // if the server doesn't include one in its initial SETTINGS frame.
+ defaultMaxConcurrentStreams = 1000
)
// Transport is an HTTP/2 Transport.
@@ -121,6 +131,17 @@ type Transport struct {
// Defaults to 15s.
PingTimeout time.Duration
+ // WriteByteTimeout is the timeout after which the connection will be
+ // closed no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ WriteByteTimeout time.Duration
+
+ // CountError, if non-nil, is called on HTTP/2 transport errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
// t1, if non-nil, is the standard library Transport using
// this transport. Its settings are used (but not its
// RoundTrip method, etc).
@@ -227,11 +248,12 @@ func (t *Transport) initConnPool() {
// ClientConn is the state of a single HTTP/2 client connection to an
// HTTP/2 server.
type ClientConn struct {
- t *Transport
- tconn net.Conn // usually *tls.Conn, except specialized impls
- tlsState *tls.ConnectionState // nil only for specialized impls
- reused uint32 // whether conn is being reused; atomic
- singleUse bool // whether being used for a single http.Request
+ t *Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+ reused uint32 // whether conn is being reused; atomic
+ singleUse bool // whether being used for a single http.Request
+ getConnCalled bool // used by clientConnPool
// readLoop goroutine fields:
readerDone chan struct{} // closed on error
@@ -244,86 +266,94 @@ type ClientConn struct {
cond *sync.Cond // hold mu; broadcast on flow/closed changes
flow flow // our conn-level flow control quota (cs.flow is per stream)
inflow flow // peer's conn-level flow control
+ doNotReuse bool // whether conn is marked to not be reused for any future requests
closing bool
closed bool
+ seenSettings bool // true if we've seen a settings frame, false otherwise
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated
+ streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
nextStreamID uint32
pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
- bw *bufio.Writer
br *bufio.Reader
- fr *Framer
lastActive time.Time
lastIdle time.Time // time last idle
- // Settings from peer: (also guarded by mu)
+ // Settings from peer: (also guarded by wmu)
maxFrameSize uint32
maxConcurrentStreams uint32
peerMaxHeaderListSize uint64
initialWindowSize uint32
+ // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
+ // Write to reqHeaderMu to lock it, read from it to unlock.
+ // Lock reqmu BEFORE mu or wmu.
+ reqHeaderMu chan struct{}
+
+ // wmu is held while writing.
+ // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
+ // Only acquire both at the same time when changing peer settings.
+ wmu sync.Mutex
+ bw *bufio.Writer
+ fr *Framer
+ werr error // first write error that has occurred
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
-
- wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
- werr error // first write error that has occurred
}
// clientStream is the state for a single HTTP/2 stream. One of these
// is created for each Transport.RoundTrip call.
type clientStream struct {
- cc *ClientConn
- req *http.Request
+ cc *ClientConn
+
+ // Fields of Request that we may access even after the response body is closed.
+ ctx context.Context
+ reqCancel <-chan struct{}
+
trace *httptrace.ClientTrace // or nil
ID uint32
- resc chan resAndError
bufPipe pipe // buffered pipe with the flow-controlled response payload
- startedWrite bool // started request body write; guarded by cc.mu
requestedGzip bool
- on100 func() // optional code to run if get a 100 continue response
+ isHead bool
+
+ abortOnce sync.Once
+ abort chan struct{} // closed to signal stream should end immediately
+ abortErr error // set if abort is closed
+
+ peerClosed chan struct{} // closed when the peer sends an END_STREAM flag
+ donec chan struct{} // closed after the stream is in the closed state
+ on100 chan struct{} // buffered; written to if a 100 is received
+
+ respHeaderRecv chan struct{} // closed when headers are received
+ res *http.Response // set if respHeaderRecv is closed
flow flow // guarded by cc.mu
inflow flow // guarded by cc.mu
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
readErr error // sticky read error; owned by transportResponseBody.Read
- stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
- didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu
- peerReset chan struct{} // closed on peer reset
- resetErr error // populated before peerReset is closed
+ reqBody io.ReadCloser
+ reqBodyContentLength int64 // -1 means unknown
+ reqBodyClosed bool // body has been closed; guarded by cc.mu
- done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
+ // owned by writeRequest:
+ sentEndStream bool // sent an END_STREAM flag to the peer
+ sentHeaders bool
// owned by clientConnReadLoop:
firstByte bool // got the first response byte
pastHeaders bool // got first MetaHeadersFrame (actual headers)
pastTrailers bool // got optional second MetaHeadersFrame (trailers)
num1xx uint8 // number of 1xx responses seen
+ readClosed bool // peer sent an END_STREAM flag
+ readAborted bool // read loop reset the stream
trailer http.Header // accumulated trailers
resTrailer *http.Header // client's Response.Trailer
}
-// awaitRequestCancel waits for the user to cancel a request or for the done
-// channel to be signaled. A non-nil error is returned only if the request was
-// canceled.
-func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
- ctx := req.Context()
- if req.Cancel == nil && ctx.Done() == nil {
- return nil
- }
- select {
- case <-req.Cancel:
- return errRequestCanceled
- case <-ctx.Done():
- return ctx.Err()
- case <-done:
- return nil
- }
-}
-
var got1xxFuncForTests func(int, textproto.MIMEHeader) error
// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
@@ -335,78 +365,65 @@ func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error
return traceGot1xxResponseFunc(cs.trace)
}
-// awaitRequestCancel waits for the user to cancel a request, its context to
-// expire, or for the request to be done (any way it might be removed from the
-// cc.streams map: peer reset, successful completion, TCP connection breakage,
-// etc). If the request is canceled, then cs will be canceled and closed.
-func (cs *clientStream) awaitRequestCancel(req *http.Request) {
- if err := awaitRequestCancel(req, cs.done); err != nil {
- cs.cancelStream()
- cs.bufPipe.CloseWithError(err)
- }
+func (cs *clientStream) abortStream(err error) {
+ cs.cc.mu.Lock()
+ defer cs.cc.mu.Unlock()
+ cs.abortStreamLocked(err)
}
-func (cs *clientStream) cancelStream() {
- cc := cs.cc
- cc.mu.Lock()
- didReset := cs.didReset
- cs.didReset = true
- cc.mu.Unlock()
-
- if !didReset {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- cc.forgetStreamID(cs.ID)
+func (cs *clientStream) abortStreamLocked(err error) {
+ cs.abortOnce.Do(func() {
+ cs.abortErr = err
+ close(cs.abort)
+ })
+ if cs.reqBody != nil && !cs.reqBodyClosed {
+ cs.reqBody.Close()
+ cs.reqBodyClosed = true
}
-}
-
-// checkResetOrDone reports any error sent in a RST_STREAM frame by the
-// server, or errStreamClosed if the stream is complete.
-func (cs *clientStream) checkResetOrDone() error {
- select {
- case <-cs.peerReset:
- return cs.resetErr
- case <-cs.done:
- return errStreamClosed
- default:
- return nil
+ // TODO(dneil): Clean up tests where cs.cc.cond is nil.
+ if cs.cc.cond != nil {
+ // Wake up writeRequestBody if it is waiting on flow control.
+ cs.cc.cond.Broadcast()
}
}
-func (cs *clientStream) getStartedWrite() bool {
+func (cs *clientStream) abortRequestBodyWrite() {
cc := cs.cc
cc.mu.Lock()
defer cc.mu.Unlock()
- return cs.startedWrite
-}
-
-func (cs *clientStream) abortRequestBodyWrite(err error) {
- if err == nil {
- panic("nil error")
- }
- cc := cs.cc
- cc.mu.Lock()
- if cs.stopReqBody == nil {
- cs.stopReqBody = err
- if cs.req.Body != nil {
- cs.req.Body.Close()
- }
+ if cs.reqBody != nil && !cs.reqBodyClosed {
+ cs.reqBody.Close()
+ cs.reqBodyClosed = true
cc.cond.Broadcast()
}
- cc.mu.Unlock()
}
type stickyErrWriter struct {
- w io.Writer
- err *error
+ conn net.Conn
+ timeout time.Duration
+ err *error
}
func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
- n, err = sew.w.Write(p)
- *sew.err = err
- return
+ for {
+ if sew.timeout != 0 {
+ sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
+ }
+ nn, err := sew.conn.Write(p[n:])
+ n += nn
+ if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
+ // Keep extending the deadline so long as we're making progress.
+ continue
+ }
+ if sew.timeout != 0 {
+ sew.conn.SetWriteDeadline(time.Time{})
+ }
+ *sew.err = err
+ return n, err
+ }
}
// noCachedConnError is the concrete type of ErrNoCachedConn, which
@@ -479,9 +496,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
}
reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
traceGotConn(req, cc, reused)
- res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
+ res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
- if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
+ if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
continue
@@ -492,7 +509,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
case <-time.After(time.Second * time.Duration(backoff)):
continue
case <-req.Context().Done():
- return nil, req.Context().Err()
+ err = req.Context().Err()
}
}
}
@@ -523,7 +540,7 @@ var (
// response headers. It is always called with a non-nil error.
// It returns either a request to retry (either the same request, or a
// modified clone), or an error if the request can't be replayed.
-func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) {
+func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) {
if !canRetryError(err) {
return nil, err
}
@@ -536,7 +553,6 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt
// If the request body can be reset back to its original
// state via the optional req.GetBody, do that.
if req.GetBody != nil {
- // TODO: consider a req.Body.Close here? or audit that all caller paths do?
body, err := req.GetBody()
if err != nil {
return nil, err
@@ -548,10 +564,8 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt
// The Request.Body can't reset back to the beginning, but we
// don't seem to have started to read from it yet, so reuse
- // the request directly. The "afterBodyWrite" means the
- // bodyWrite process has started, which becomes true before
- // the first Read.
- if !afterBodyWrite {
+ // the request directly.
+ if err == errClientConnUnusable {
return req, nil
}
@@ -563,6 +577,10 @@ func canRetryError(err error) bool {
return true
}
if se, ok := err.(StreamError); ok {
+ if se.Code == ErrCodeProtocol && se.Cause == errFromPeer {
+ // See golang/go#47635, golang/go#42777
+ return true
+ }
return se.Code == ErrCodeRefusedStream
}
return false
@@ -637,14 +655,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
tconn: c,
readerDone: make(chan struct{}),
nextStreamID: 1,
- maxFrameSize: 16 << 10, // spec default
- initialWindowSize: 65535, // spec default
- maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
+ reqHeaderMu: make(chan struct{}, 1),
}
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
@@ -659,9 +678,16 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
- cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
+ cc.bw = bufio.NewWriter(stickyErrWriter{
+ conn: c,
+ timeout: t.WriteByteTimeout,
+ err: &cc.werr,
+ })
cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br)
+ if t.CountError != nil {
+ cc.fr.countError = t.CountError
+ }
cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
@@ -714,6 +740,13 @@ func (cc *ClientConn) healthCheck() {
}
}
+// SetDoNotReuse marks cc as not reusable for future HTTP requests.
+func (cc *ClientConn) SetDoNotReuse() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.doNotReuse = true
+}
+
func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -731,27 +764,94 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
last := f.LastStreamID
for streamID, cs := range cc.streams {
if streamID > last {
- select {
- case cs.resc <- resAndError{err: errClientConnGotGoAway}:
- default:
- }
+ cs.abortStreamLocked(errClientConnGotGoAway)
}
}
}
// CanTakeNewRequest reports whether the connection can take a new request,
// meaning it has not been closed or received or sent a GOAWAY.
+//
+// If the caller is going to immediately make a new request on this
+// connection, use ReserveNewRequest instead.
func (cc *ClientConn) CanTakeNewRequest() bool {
cc.mu.Lock()
defer cc.mu.Unlock()
return cc.canTakeNewRequestLocked()
}
+// ReserveNewRequest is like CanTakeNewRequest but also reserves a
+// concurrent stream in cc. The reservation is decremented on the
+// next call to RoundTrip.
+func (cc *ClientConn) ReserveNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if st := cc.idleStateLocked(); !st.canTakeNewRequest {
+ return false
+ }
+ cc.streamsReserved++
+ return true
+}
+
+// ClientConnState describes the state of a ClientConn.
+type ClientConnState struct {
+ // Closed is whether the connection is closed.
+ Closed bool
+
+ // Closing is whether the connection is in the process of
+ // closing. It may be closing due to shutdown, being a
+ // single-use connection, being marked as DoNotReuse, or
+ // having received a GOAWAY frame.
+ Closing bool
+
+ // StreamsActive is how many streams are active.
+ StreamsActive int
+
+ // StreamsReserved is how many streams have been reserved via
+ // ClientConn.ReserveNewRequest.
+ StreamsReserved int
+
+ // StreamsPending is how many requests have been sent in excess
+ // of the peer's advertised MaxConcurrentStreams setting and
+ // are waiting for other streams to complete.
+ StreamsPending int
+
+ // MaxConcurrentStreams is how many concurrent streams the
+ // peer advertised as acceptable. Zero means no SETTINGS
+ // frame has been received yet.
+ MaxConcurrentStreams uint32
+
+ // LastIdle, if non-zero, is when the connection last
+ // transitioned to idle state.
+ LastIdle time.Time
+}
+
+// State returns a snapshot of cc's state.
+func (cc *ClientConn) State() ClientConnState {
+ cc.wmu.Lock()
+ maxConcurrent := cc.maxConcurrentStreams
+ if !cc.seenSettings {
+ maxConcurrent = 0
+ }
+ cc.wmu.Unlock()
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return ClientConnState{
+ Closed: cc.closed,
+ Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil,
+ StreamsActive: len(cc.streams),
+ StreamsReserved: cc.streamsReserved,
+ StreamsPending: cc.pendingRequests,
+ LastIdle: cc.lastIdle,
+ MaxConcurrentStreams: maxConcurrent,
+ }
+}
+
// clientConnIdleState describes the suitability of a client
// connection to initiate a new RoundTrip request.
type clientConnIdleState struct {
canTakeNewRequest bool
- freshConn bool // whether it's unused by any previous request
}
func (cc *ClientConn) idleState() clientConnIdleState {
@@ -772,13 +872,13 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
// writing it.
maxConcurrentOkay = true
} else {
- maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams)
+ maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams)
}
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
+ !cc.doNotReuse &&
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
!cc.tooIdleLocked()
- st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
return
}
@@ -809,7 +909,7 @@ func (cc *ClientConn) onIdleTimeout() {
func (cc *ClientConn) closeIfIdle() {
cc.mu.Lock()
- if len(cc.streams) > 0 {
+ if len(cc.streams) > 0 || cc.streamsReserved > 0 {
cc.mu.Unlock()
return
}
@@ -824,9 +924,15 @@ func (cc *ClientConn) closeIfIdle() {
cc.tconn.Close()
}
+func (cc *ClientConn) isDoNotReuseAndIdle() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.doNotReuse && len(cc.streams) == 0
+}
+
var shutdownEnterWaitStateHook = func() {}
-// Shutdown gracefully close the client connection, waiting for running streams to complete.
+// Shutdown gracefully closes the client connection, waiting for running streams to complete.
func (cc *ClientConn) Shutdown(ctx context.Context) error {
if err := cc.sendGoAway(); err != nil {
return err
@@ -865,15 +971,18 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
func (cc *ClientConn) sendGoAway() error {
cc.mu.Lock()
- defer cc.mu.Unlock()
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- if cc.closing {
+ closing := cc.closing
+ cc.closing = true
+ maxStreamID := cc.nextStreamID
+ cc.mu.Unlock()
+ if closing {
// GOAWAY sent already
return nil
}
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
// Send a graceful shutdown frame to server
- maxStreamID := cc.nextStreamID
if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
return err
}
@@ -881,7 +990,6 @@ func (cc *ClientConn) sendGoAway() error {
return err
}
// Prevent new requests
- cc.closing = true
return nil
}
@@ -889,17 +997,12 @@ func (cc *ClientConn) sendGoAway() error {
// err is sent to streams.
func (cc *ClientConn) closeForError(err error) error {
cc.mu.Lock()
+ cc.closed = true
+ for _, cs := range cc.streams {
+ cs.abortStreamLocked(err)
+ }
defer cc.cond.Broadcast()
defer cc.mu.Unlock()
- for id, cs := range cc.streams {
- select {
- case cs.resc <- resAndError{err: err}:
- default:
- }
- cs.bufPipe.CloseWithError(err)
- delete(cc.streams, id)
- }
- cc.closed = true
return cc.tconn.Close()
}
@@ -914,6 +1017,9 @@ func (cc *ClientConn) Close() error {
// closes the client connection immediately. In-flight requests are interrupted.
func (cc *ClientConn) closeForLostPing() error {
err := errors.New("http2: client connection lost")
+ if f := cc.t.CountError; f != nil {
+ f("conn_close_lost_ping")
+ }
return cc.closeForError(err)
}
@@ -978,41 +1084,142 @@ func actualContentLength(req *http.Request) int64 {
return -1
}
+func (cc *ClientConn) decrStreamReservations() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.decrStreamReservationsLocked()
+}
+
+func (cc *ClientConn) decrStreamReservationsLocked() {
+ if cc.streamsReserved > 0 {
+ cc.streamsReserved--
+ }
+}
+
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
- resp, _, err := cc.roundTrip(req)
- return resp, err
+ ctx := req.Context()
+ cs := &clientStream{
+ cc: cc,
+ ctx: ctx,
+ reqCancel: req.Cancel,
+ isHead: req.Method == "HEAD",
+ reqBody: req.Body,
+ reqBodyContentLength: actualContentLength(req),
+ trace: httptrace.ContextClientTrace(ctx),
+ peerClosed: make(chan struct{}),
+ abort: make(chan struct{}),
+ respHeaderRecv: make(chan struct{}),
+ donec: make(chan struct{}),
+ }
+ go cs.doRequest(req)
+
+ waitDone := func() error {
+ select {
+ case <-cs.donec:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ }
+ }
+
+ for {
+ select {
+ case <-cs.respHeaderRecv:
+ res := cs.res
+ if res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ cs.abortRequestBodyWrite()
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ if res.Body == noBody && actualContentLength(req) == 0 {
+ // If there isn't a request or response body still being
+ // written, then wait for the stream to be closed before
+ // RoundTrip returns.
+ if err := waitDone(); err != nil {
+ return nil, err
+ }
+ }
+ return res, nil
+ case <-cs.abort:
+ waitDone()
+ return nil, cs.abortErr
+ case <-ctx.Done():
+ err := ctx.Err()
+ cs.abortStream(err)
+ return nil, err
+ case <-cs.reqCancel:
+ cs.abortStream(errRequestCanceled)
+ return nil, errRequestCanceled
+ }
+ }
+}
+
+// doRequest runs for the duration of the request lifetime.
+//
+// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
+func (cs *clientStream) doRequest(req *http.Request) {
+ err := cs.writeRequest(req)
+ cs.cleanupWriteRequest(err)
}
-func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) {
+// writeRequest sends a request.
+//
+// It returns nil after the request is written, the response read,
+// and the request stream is half-closed by the peer.
+//
+// It returns non-nil if the request ends otherwise.
+// If the returned error is StreamError, the error Code may be used in resetting the stream.
+func (cs *clientStream) writeRequest(req *http.Request) (err error) {
+ cc := cs.cc
+ ctx := cs.ctx
+
if err := checkConnHeaders(req); err != nil {
- return nil, false, err
- }
- if cc.idleTimer != nil {
- cc.idleTimer.Stop()
+ return err
}
- trailers, err := commaSeparatedTrailers(req)
- if err != nil {
- return nil, false, err
+ // Acquire the new-request lock by writing to reqHeaderMu.
+ // This lock guards the critical section covering allocating a new stream ID
+ // (requires mu) and creating the stream (requires wmu).
+ if cc.reqHeaderMu == nil {
+ panic("RoundTrip on uninitialized ClientConn") // for tests
+ }
+ select {
+ case cc.reqHeaderMu <- struct{}{}:
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
}
- hasTrailers := trailers != ""
cc.mu.Lock()
- if err := cc.awaitOpenSlotForRequest(req); err != nil {
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+ cc.decrStreamReservationsLocked()
+ if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil {
cc.mu.Unlock()
- return nil, false, err
+ <-cc.reqHeaderMu
+ return err
}
-
- body := req.Body
- contentLen := actualContentLength(req)
- hasBody := contentLen != 0
+ cc.addStreamLocked(cs) // assigns stream ID
+ cc.mu.Unlock()
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
- var requestedGzip bool
if !cc.t.disableCompression() &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
- req.Method != "HEAD" {
+ !cs.isHead {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: https://zlib.net/zlib_faq.html#faq39
@@ -1025,183 +1232,223 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
- requestedGzip = true
+ cs.requestedGzip = true
}
- // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
- // sent by writeRequestBody below, along with any Trailers,
- // again in form HEADERS{1}, CONTINUATION{0,})
- hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
+ continueTimeout := cc.t.expectContinueTimeout()
+ if continueTimeout != 0 &&
+ !httpguts.HeaderValuesContainsToken(
+ req.Header["Expect"],
+ "100-continue") {
+ continueTimeout = 0
+ cs.on100 = make(chan struct{}, 1)
+ }
+
+ // Past this point (where we send request headers), it is possible for
+ // RoundTrip to return successfully. Since the RoundTrip contract permits
+ // the caller to "mutate or reuse" the Request after closing the Response's Body,
+ // we must take care when referencing the Request from here on.
+ err = cs.encodeAndWriteHeaders(req)
+ <-cc.reqHeaderMu
if err != nil {
- cc.mu.Unlock()
- return nil, false, err
+ return err
}
- cs := cc.newStream()
- cs.req = req
- cs.trace = httptrace.ContextClientTrace(req.Context())
- cs.requestedGzip = requestedGzip
- bodyWriter := cc.t.getBodyWriterState(cs, body)
- cs.on100 = bodyWriter.on100
+ hasBody := cs.reqBodyContentLength != 0
+ if !hasBody {
+ cs.sentEndStream = true
+ } else {
+ if continueTimeout != 0 {
+ traceWait100Continue(cs.trace)
+ timer := time.NewTimer(continueTimeout)
+ select {
+ case <-timer.C:
+ err = nil
+ case <-cs.on100:
+ err = nil
+ case <-cs.abort:
+ err = cs.abortErr
+ case <-ctx.Done():
+ err = ctx.Err()
+ case <-cs.reqCancel:
+ err = errRequestCanceled
+ }
+ timer.Stop()
+ if err != nil {
+ traceWroteRequest(cs.trace, err)
+ return err
+ }
+ }
- defer func() {
- cc.wmu.Lock()
- werr := cc.werr
- cc.wmu.Unlock()
- if werr != nil {
- cc.Close()
+ if err = cs.writeRequestBody(req); err != nil {
+ if err != errStopReqBodyWrite {
+ traceWroteRequest(cs.trace, err)
+ return err
+ }
+ } else {
+ cs.sentEndStream = true
}
- }()
+ }
+
+ traceWroteRequest(cs.trace, err)
+
+ var respHeaderTimer <-chan time.Time
+ var respHeaderRecv chan struct{}
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ respHeaderRecv = cs.respHeaderRecv
+ }
+ // Wait until the peer half-closes its end of the stream,
+ // or until the request is aborted (via context, error, or otherwise),
+ // whichever comes first.
+ for {
+ select {
+ case <-cs.peerClosed:
+ return nil
+ case <-respHeaderTimer:
+ return errTimeout
+ case <-respHeaderRecv:
+ respHeaderTimer = nil // keep waiting for END_STREAM
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ }
+ }
+}
+
+func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
+ cc := cs.cc
+ ctx := cs.ctx
cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ // If the request was canceled while waiting for cc.mu, just quit.
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ default:
+ }
+
+ // Encode headers.
+ //
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ trailers, err := commaSeparatedTrailers(req)
+ if err != nil {
+ return err
+ }
+ hasTrailers := trailers != ""
+ contentLen := actualContentLength(req)
+ hasBody := contentLen != 0
+ hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
+ if err != nil {
+ return err
+ }
+
+ // Write the request.
endStream := !hasBody && !hasTrailers
- werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
- cc.wmu.Unlock()
+ cs.sentHeaders = true
+ err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
traceWroteHeaders(cs.trace)
- cc.mu.Unlock()
+ return err
+}
- if werr != nil {
- if hasBody {
- req.Body.Close() // per RoundTripper contract
- bodyWriter.cancel()
- }
- cc.forgetStreamID(cs.ID)
- // Don't bother sending a RST_STREAM (our write already failed;
- // no need to keep writing)
- traceWroteRequest(cs.trace, werr)
- return nil, false, werr
- }
+// cleanupWriteRequest performs post-request tasks.
+//
+// If err (the result of writeRequest) is non-nil and the stream is not closed,
+// cleanupWriteRequest will send a reset to the peer.
+func (cs *clientStream) cleanupWriteRequest(err error) {
+ cc := cs.cc
- var respHeaderTimer <-chan time.Time
- if hasBody {
- bodyWriter.scheduleBodyWrite()
- } else {
- traceWroteRequest(cs.trace, nil)
- if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
- defer timer.Stop()
- respHeaderTimer = timer.C
- }
+ if cs.ID == 0 {
+ // We were canceled before creating the stream, so return our reservation.
+ cc.decrStreamReservations()
}
- readLoopResCh := cs.resc
- bodyWritten := false
- ctx := req.Context()
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cc.mu.Lock()
+ bodyClosed := cs.reqBodyClosed
+ cs.reqBodyClosed = true
+ cc.mu.Unlock()
+ if !bodyClosed && cs.reqBody != nil {
+ cs.reqBody.Close()
+ }
- handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) {
- res := re.res
- if re.err != nil || res.StatusCode > 299 {
- // On error or status code 3xx, 4xx, 5xx, etc abort any
- // ongoing write, assuming that the server doesn't care
- // about our request body. If the server replied with 1xx or
- // 2xx, however, then assume the server DOES potentially
- // want our body (e.g. full-duplex streaming:
- // golang.org/issue/13444). If it turns out the server
- // doesn't, they'll RST_STREAM us soon enough. This is a
- // heuristic to avoid adding knobs to Transport. Hopefully
- // we can keep it.
- bodyWriter.cancel()
- cs.abortRequestBodyWrite(errStopReqBodyWrite)
- if hasBody && !bodyWritten {
- <-bodyWriter.resc
+ if err != nil && cs.sentEndStream {
+ // If the connection is closed immediately after the response is read,
+ // we may be aborted before finishing up here. If the stream was closed
+ // cleanly on both sides, there is no error.
+ select {
+ case <-cs.peerClosed:
+ err = nil
+ default:
+ }
+ }
+ if err != nil {
+ cs.abortStream(err) // possibly redundant, but harmless
+ if cs.sentHeaders {
+ if se, ok := err.(StreamError); ok {
+ if se.Cause != errFromPeer {
+ cc.writeStreamReset(cs.ID, se.Code, err)
+ }
+ } else {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
}
}
- if re.err != nil {
- cc.forgetStreamID(cs.ID)
- return nil, cs.getStartedWrite(), re.err
+ cs.bufPipe.CloseWithError(err) // no-op if already closed
+ } else {
+ if cs.sentHeaders && !cs.sentEndStream {
+ cc.writeStreamReset(cs.ID, ErrCodeNo, nil)
}
- res.Request = req
- res.TLS = cc.tlsState
- return res, false, nil
+ cs.bufPipe.CloseWithError(errRequestCanceled)
}
-
- handleError := func(err error) (*http.Response, bool, error) {
- if !hasBody || bodyWritten {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- } else {
- bodyWriter.cancel()
- cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
- <-bodyWriter.resc
- }
+ if cs.ID != 0 {
cc.forgetStreamID(cs.ID)
- return nil, cs.getStartedWrite(), err
}
- for {
- select {
- case re := <-readLoopResCh:
- return handleReadLoopResponse(re)
- case <-respHeaderTimer:
- return handleError(errTimeout)
- case <-ctx.Done():
- return handleError(ctx.Err())
- case <-req.Cancel:
- return handleError(errRequestCanceled)
- case <-cs.peerReset:
- // processResetStream already removed the
- // stream from the streams map; no need for
- // forgetStreamID.
- return nil, cs.getStartedWrite(), cs.resetErr
- case err := <-bodyWriter.resc:
- bodyWritten = true
- // Prefer the read loop's response, if available. Issue 16102.
- select {
- case re := <-readLoopResCh:
- return handleReadLoopResponse(re)
- default:
- }
- if err != nil {
- cc.forgetStreamID(cs.ID)
- return nil, cs.getStartedWrite(), err
- }
- if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
- defer timer.Stop()
- respHeaderTimer = timer.C
- }
- }
+ cc.wmu.Lock()
+ werr := cc.werr
+ cc.wmu.Unlock()
+ if werr != nil {
+ cc.Close()
}
+
+ close(cs.donec)
}
-// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.
+// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams.
// Must hold cc.mu.
-func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
- var waitingForConn chan struct{}
- var waitingForConnErr error // guarded by cc.mu
+func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
for {
cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
- if waitingForConn != nil {
- close(waitingForConn)
- }
return errClientConnUnusable
}
cc.lastIdle = time.Time{}
- if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
- if waitingForConn != nil {
- close(waitingForConn)
- }
+ if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) {
return nil
}
- // Unfortunately, we cannot wait on a condition variable and channel at
- // the same time, so instead, we spin up a goroutine to check if the
- // request is canceled while we wait for a slot to open in the connection.
- if waitingForConn == nil {
- waitingForConn = make(chan struct{})
- go func() {
- if err := awaitRequestCancel(req, waitingForConn); err != nil {
- cc.mu.Lock()
- waitingForConnErr = err
- cc.cond.Broadcast()
- cc.mu.Unlock()
- }
- }()
- }
cc.pendingRequests++
cc.cond.Wait()
cc.pendingRequests--
- if waitingForConnErr != nil {
- return waitingForConnErr
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ default:
}
}
}
@@ -1228,10 +1475,6 @@ func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize
cc.fr.WriteContinuation(streamID, endHeaders, chunk)
}
}
- // TODO(bradfitz): this Flush could potentially block (as
- // could the WriteHeaders call(s) above), which means they
- // wouldn't respond to Request.Cancel being readable. That's
- // rare, but this should probably be in a goroutine.
cc.bw.Flush()
return cc.werr
}
@@ -1258,7 +1501,7 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
if n > max {
n = max
}
- if cl := actualContentLength(cs.req); cl != -1 && cl+1 < n {
+ if cl := cs.reqBodyContentLength; cl != -1 && cl+1 < n {
// Add an extra byte past the declared content-length to
// give the caller's Request.Body io.Reader a chance to
// give us more bytes than they declared, so we can catch it
@@ -1273,31 +1516,13 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
var bufPool sync.Pool // of *[]byte
-func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
+func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
cc := cs.cc
+ body := cs.reqBody
sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
- defer func() {
- traceWroteRequest(cs.trace, err)
- // TODO: write h12Compare test showing whether
- // Request.Body is closed by the Transport,
- // and in multiple cases: server replies <=299 and >299
- // while still writing request body
- var cerr error
- cc.mu.Lock()
- if cs.stopReqBody == nil {
- cs.stopReqBody = errStopReqBodyWrite
- cerr = bodyCloser.Close()
- }
- cc.mu.Unlock()
- if err == nil {
- err = cerr
- }
- }()
-
- req := cs.req
hasTrailers := req.Trailer != nil
- remainLen := actualContentLength(req)
+ remainLen := cs.reqBodyContentLength
hasContentLen := remainLen != -1
cc.mu.Lock()
@@ -1335,29 +1560,29 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
}
if remainLen < 0 {
err = errReqBodyTooLong
- cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
return err
}
}
- if err == io.EOF {
- sawEOF = true
- err = nil
- } else if err != nil {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
- return err
+ if err != nil {
+ cc.mu.Lock()
+ bodyClosed := cs.reqBodyClosed
+ cc.mu.Unlock()
+ switch {
+ case bodyClosed:
+ return errStopReqBodyWrite
+ case err == io.EOF:
+ sawEOF = true
+ err = nil
+ default:
+ return err
+ }
}
remain := buf[:n]
for len(remain) > 0 && err == nil {
var allowed int32
allowed, err = cs.awaitFlowControl(len(remain))
- switch {
- case err == errStopReqBodyWrite:
- return err
- case err == errStopReqBodyWriteAndCancel:
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- return err
- case err != nil:
+ if err != nil {
return err
}
cc.wmu.Lock()
@@ -1388,21 +1613,27 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
return nil
}
+ // Since the RoundTrip contract permits the caller to "mutate or reuse"
+ // a request after the Response's Body is closed, verify that this hasn't
+ // happened before accessing the trailers.
+ cc.mu.Lock()
+ trailer := req.Trailer
+ err = cs.abortErr
+ cc.mu.Unlock()
+ if err != nil {
+ return err
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
var trls []byte
- if hasTrailers {
- cc.mu.Lock()
- trls, err = cc.encodeTrailers(req)
- cc.mu.Unlock()
+ if len(trailer) > 0 {
+ trls, err = cc.encodeTrailers(trailer)
if err != nil {
- cc.writeStreamReset(cs.ID, ErrCodeInternal, err)
- cc.forgetStreamID(cs.ID)
return err
}
}
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
-
// Two ways to send END_STREAM: either with trailers, or
// with an empty DATA frame.
if len(trls) > 0 {
@@ -1422,17 +1653,24 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
// if the stream is dead.
func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
cc := cs.cc
+ ctx := cs.ctx
cc.mu.Lock()
defer cc.mu.Unlock()
for {
if cc.closed {
return 0, errClientConnClosed
}
- if cs.stopReqBody != nil {
- return 0, cs.stopReqBody
+ if cs.reqBodyClosed {
+ return 0, errStopReqBodyWrite
}
- if err := cs.checkResetOrDone(); err != nil {
- return 0, err
+ select {
+ case <-cs.abort:
+ return 0, cs.abortErr
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ case <-cs.reqCancel:
+ return 0, errRequestCanceled
+ default:
}
if a := cs.flow.available(); a > 0 {
take := a
@@ -1450,9 +1688,14 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
-// requires cc.mu be held.
+var errNilRequestURL = errors.New("http2: Request.URI is nil")
+
+// requires cc.wmu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
+ if req.URL == nil {
+ return nil, errNilRequestURL
+ }
host := req.Host
if host == "" {
@@ -1638,12 +1881,12 @@ func shouldSendReqContentLength(method string, contentLength int64) bool {
}
}
-// requires cc.mu be held.
-func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) {
+// requires cc.wmu be held.
+func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
cc.hbuf.Reset()
hlSize := uint64(0)
- for k, vv := range req.Trailer {
+ for k, vv := range trailer {
for _, v := range vv {
hf := hpack.HeaderField{Name: k, Value: v}
hlSize += uint64(hf.Size())
@@ -1653,7 +1896,7 @@ func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) {
return nil, errRequestHeaderListSize
}
- for k, vv := range req.Trailer {
+ for k, vv := range trailer {
lowKey, ascii := asciiToLower(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
@@ -1683,51 +1926,51 @@ type resAndError struct {
}
// requires cc.mu be held.
-func (cc *ClientConn) newStream() *clientStream {
- cs := &clientStream{
- cc: cc,
- ID: cc.nextStreamID,
- resc: make(chan resAndError, 1),
- peerReset: make(chan struct{}),
- done: make(chan struct{}),
- }
+func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
cs.inflow.add(transportDefaultStreamFlow)
cs.inflow.setConnFlow(&cc.inflow)
+ cs.ID = cc.nextStreamID
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
- return cs
+ if cs.ID == 0 {
+ panic("assigned stream ID 0")
+ }
}
func (cc *ClientConn) forgetStreamID(id uint32) {
- cc.streamByID(id, true)
-}
-
-func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
cc.mu.Lock()
- defer cc.mu.Unlock()
- cs := cc.streams[id]
- if andRemove && cs != nil && !cc.closed {
- cc.lastActive = time.Now()
- delete(cc.streams, id)
- if len(cc.streams) == 0 && cc.idleTimer != nil {
- cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = time.Now()
- }
- close(cs.done)
- // Wake up checkResetOrDone via clientStream.awaitFlowControl and
- // wake up RoundTrip if there is a pending request.
- cc.cond.Broadcast()
+ slen := len(cc.streams)
+ delete(cc.streams, id)
+ if len(cc.streams) != slen-1 {
+ panic("forgetting unknown stream id")
+ }
+ cc.lastActive = time.Now()
+ if len(cc.streams) == 0 && cc.idleTimer != nil {
+ cc.idleTimer.Reset(cc.idleTimeout)
+ cc.lastIdle = time.Now()
+ }
+ // Wake up writeRequestBody via clientStream.awaitFlowControl and
+ // wake up RoundTrip if there is a pending request.
+ cc.cond.Broadcast()
+
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives()
+ if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
+ if VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2)
+ }
+ cc.closed = true
+ defer cc.tconn.Close()
}
- return cs
+
+ cc.mu.Unlock()
}
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
type clientConnReadLoop struct {
- _ incomparable
- cc *ClientConn
- closeWhenIdle bool
+ _ incomparable
+ cc *ClientConn
}
// readLoop runs in its own goroutine and reads and dispatches frames.
@@ -1787,23 +2030,49 @@ func (rl *clientConnReadLoop) cleanup() {
} else if err == io.EOF {
err = io.ErrUnexpectedEOF
}
+ cc.closed = true
for _, cs := range cc.streams {
- cs.bufPipe.CloseWithError(err) // no-op if already closed
select {
- case cs.resc <- resAndError{err: err}:
+ case <-cs.peerClosed:
+ // The server closed the stream before closing the conn,
+ // so no need to interrupt it.
default:
+ cs.abortStreamLocked(err)
}
- close(cs.done)
}
- cc.closed = true
cc.cond.Broadcast()
cc.mu.Unlock()
}
+// countReadFrameError calls Transport.CountError with a string
+// representing err.
+func (cc *ClientConn) countReadFrameError(err error) {
+ f := cc.t.CountError
+ if f == nil || err == nil {
+ return
+ }
+ if ce, ok := err.(ConnectionError); ok {
+ errCode := ErrCode(ce)
+ f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken()))
+ return
+ }
+ if errors.Is(err, io.EOF) {
+ f("read_frame_eof")
+ return
+ }
+ if errors.Is(err, io.ErrUnexpectedEOF) {
+ f("read_frame_unexpected_eof")
+ return
+ }
+ if errors.Is(err, ErrFrameTooLarge) {
+ f("read_frame_too_large")
+ return
+ }
+ f("read_frame_other")
+}
+
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
- rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
- gotReply := false // ever saw a HEADERS reply
gotSettings := false
readIdleTimeout := cc.t.ReadIdleTimeout
var t *time.Timer
@@ -1820,9 +2089,7 @@ func (rl *clientConnReadLoop) run() error {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
if se, ok := err.(StreamError); ok {
- if cs := cc.streamByID(se.StreamID, false); cs != nil {
- cs.cc.writeStreamReset(cs.ID, se.Code, err)
- cs.cc.forgetStreamID(cs.ID)
+ if cs := rl.streamByID(se.StreamID); cs != nil {
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
@@ -1830,6 +2097,7 @@ func (rl *clientConnReadLoop) run() error {
}
continue
} else if err != nil {
+ cc.countReadFrameError(err)
return err
}
if VerboseLogs {
@@ -1842,22 +2110,16 @@ func (rl *clientConnReadLoop) run() error {
}
gotSettings = true
}
- maybeIdle := false // whether frame might transition us to idle
switch f := f.(type) {
case *MetaHeadersFrame:
err = rl.processHeaders(f)
- maybeIdle = true
- gotReply = true
case *DataFrame:
err = rl.processData(f)
- maybeIdle = true
case *GoAwayFrame:
err = rl.processGoAway(f)
- maybeIdle = true
case *RSTStreamFrame:
err = rl.processResetStream(f)
- maybeIdle = true
case *SettingsFrame:
err = rl.processSettings(f)
case *PushPromiseFrame:
@@ -1875,38 +2137,24 @@ func (rl *clientConnReadLoop) run() error {
}
return err
}
- if rl.closeWhenIdle && gotReply && maybeIdle {
- cc.closeIfIdle()
- }
}
}
func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
- cc := rl.cc
- cs := cc.streamByID(f.StreamID, false)
+ cs := rl.streamByID(f.StreamID)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
// was just something we canceled, ignore it.
return nil
}
- if f.StreamEnded() {
- // Issue 20521: If the stream has ended, streamByID() causes
- // clientStream.done to be closed, which causes the request's bodyWriter
- // to be closed with an errStreamClosed, which may be received by
- // clientConn.RoundTrip before the result of processing these headers.
- // Deferring stream closure allows the header processing to occur first.
- // clientConn.RoundTrip may still receive the bodyWriter error first, but
- // the fix for issue 16102 prioritises any response.
- //
- // Issue 22413: If there is no request body, we should close the
- // stream before writing to cs.resc so that the stream is closed
- // immediately once RoundTrip returns.
- if cs.req.Body != nil {
- defer cc.forgetStreamID(f.StreamID)
- } else {
- cc.forgetStreamID(f.StreamID)
- }
+ if cs.readClosed {
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ Cause: errors.New("protocol error: headers after END_STREAM"),
+ })
+ return nil
}
if !cs.firstByte {
if cs.trace != nil {
@@ -1930,9 +2178,11 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
return err
}
// Any other error type is a stream error.
- cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
- cc.forgetStreamID(cs.ID)
- cs.resc <- resAndError{err: err}
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ Cause: err,
+ })
return nil // return nil from process* funcs to keep conn alive
}
if res == nil {
@@ -1940,7 +2190,11 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
return nil
}
cs.resTrailer = &res.Trailer
- cs.resc <- resAndError{res: res}
+ cs.res = res
+ close(cs.respHeaderRecv)
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
return nil
}
@@ -2002,6 +2256,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
}
if statusCode >= 100 && statusCode <= 199 {
+ if f.StreamEnded() {
+ return nil, errors.New("1xx informational response with END_STREAM flag")
+ }
cs.num1xx++
const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
if cs.num1xx > max1xxResponses {
@@ -2014,40 +2271,47 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
}
if statusCode == 100 {
traceGot100Continue(cs.trace)
- if cs.on100 != nil {
- cs.on100() // forces any write delay timer to fire
+ select {
+ case cs.on100 <- struct{}{}:
+ default:
}
}
cs.pastHeaders = false // do it all again
return nil, nil
}
- streamEnded := f.StreamEnded()
- isHead := cs.req.Method == "HEAD"
- if !streamEnded || isHead {
- res.ContentLength = -1
- if clens := res.Header["Content-Length"]; len(clens) == 1 {
- if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil {
- res.ContentLength = int64(cl)
- } else {
- // TODO: care? unlike http/1, it won't mess up our framing, so it's
- // more safe smuggling-wise to ignore.
- }
- } else if len(clens) > 1 {
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if cl, err := strconv.ParseUint(clens[0], 10, 63); err == nil {
+ res.ContentLength = int64(cl)
+ } else {
// TODO: care? unlike http/1, it won't mess up our framing, so it's
// more safe smuggling-wise to ignore.
}
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ } else if f.StreamEnded() && !cs.isHead {
+ res.ContentLength = 0
}
- if streamEnded || isHead {
+ if cs.isHead {
res.Body = noBody
return res, nil
}
- cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
+ if f.StreamEnded() {
+ if res.ContentLength > 0 {
+ res.Body = missingBody{}
+ } else {
+ res.Body = noBody
+ }
+ return res, nil
+ }
+
+ cs.bufPipe.setBuffer(&dataBuffer{expected: res.ContentLength})
cs.bytesRemain = res.ContentLength
res.Body = transportResponseBody{cs}
- go cs.awaitRequestCancel(cs.req)
if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
res.Header.Del("Content-Encoding")
@@ -2088,8 +2352,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
}
// transportResponseBody is the concrete type of Transport.RoundTrip's
-// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
-// On Close it sends RST_STREAM if EOF wasn't already seen.
+// Response.Body. It is an io.ReadCloser.
type transportResponseBody struct {
cs *clientStream
}
@@ -2107,7 +2370,7 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
n = int(cs.bytesRemain)
if err == nil {
err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
- cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
+ cs.abortStream(err)
}
cs.readErr = err
return int(cs.bytesRemain), err
@@ -2125,8 +2388,6 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
}
cc.mu.Lock()
- defer cc.mu.Unlock()
-
var connAdd, streamAdd int32
// Check the conn-level first, before the stream-level.
if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
@@ -2143,6 +2404,8 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
cs.inflow.add(streamAdd)
}
}
+ cc.mu.Unlock()
+
if connAdd != 0 || streamAdd != 0 {
cc.wmu.Lock()
defer cc.wmu.Unlock()
@@ -2163,34 +2426,42 @@ func (b transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
- serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
unread := cs.bufPipe.Len()
-
- if unread > 0 || !serverSentStreamEnd {
+ if unread > 0 {
cc.mu.Lock()
- cc.wmu.Lock()
- if !serverSentStreamEnd {
- cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
- cs.didReset = true
- }
// Return connection-level flow control.
if unread > 0 {
cc.inflow.add(int32(unread))
+ }
+ cc.mu.Unlock()
+
+ // TODO(dneil): Acquiring this mutex can block indefinitely.
+ // Move flow control return to a goroutine?
+ cc.wmu.Lock()
+ // Return connection-level flow control.
+ if unread > 0 {
cc.fr.WriteWindowUpdate(0, uint32(unread))
}
cc.bw.Flush()
cc.wmu.Unlock()
- cc.mu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
- cc.forgetStreamID(cs.ID)
+ cs.abortStream(errClosedResponseBody)
+
+ select {
+ case <-cs.donec:
+ case <-cs.ctx.Done():
+ return cs.ctx.Err()
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ }
return nil
}
func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc := rl.cc
- cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ cs := rl.streamByID(f.StreamID)
data := f.Data()
if cs == nil {
cc.mu.Lock()
@@ -2219,6 +2490,14 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
return nil
}
+ if cs.readClosed {
+ cc.logf("protocol error: received DATA after END_STREAM")
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ })
+ return nil
+ }
if !cs.firstByte {
cc.logf("protocol error: received DATA before a HEADERS frame")
rl.endStreamError(cs, StreamError{
@@ -2228,7 +2507,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
return nil
}
if f.Length > 0 {
- if cs.req.Method == "HEAD" && len(data) > 0 {
+ if cs.isHead && len(data) > 0 {
cc.logf("protocol error: received DATA on a HEAD request")
rl.endStreamError(cs, StreamError{
StreamID: f.StreamID,
@@ -2250,30 +2529,39 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
if pad := int(f.Length) - len(data); pad > 0 {
refund += pad
}
- // Return len(data) now if the stream is already closed,
- // since data will never be read.
- didReset := cs.didReset
- if didReset {
- refund += len(data)
+
+ didReset := false
+ var err error
+ if len(data) > 0 {
+ if _, err = cs.bufPipe.Write(data); err != nil {
+ // Return len(data) now if the stream is already closed,
+ // since data will never be read.
+ didReset = true
+ refund += len(data)
+ }
}
+
if refund > 0 {
cc.inflow.add(int32(refund))
+ if !didReset {
+ cs.inflow.add(int32(refund))
+ }
+ }
+ cc.mu.Unlock()
+
+ if refund > 0 {
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(refund))
if !didReset {
- cs.inflow.add(int32(refund))
cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
}
cc.bw.Flush()
cc.wmu.Unlock()
}
- cc.mu.Unlock()
- if len(data) > 0 && !didReset {
- if _, err := cs.bufPipe.Write(data); err != nil {
- rl.endStreamError(cs, err)
- return err
- }
+ if err != nil {
+ rl.endStreamError(cs, err)
+ return nil
}
}
@@ -2286,24 +2574,32 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
func (rl *clientConnReadLoop) endStream(cs *clientStream) {
// TODO: check that any declared content-length matches, like
// server.go's (*stream).endStream method.
- rl.endStreamError(cs, nil)
+ if !cs.readClosed {
+ cs.readClosed = true
+ // Close cs.bufPipe and cs.peerClosed with cc.mu held to avoid a
+ // race condition: The caller can read io.EOF from Response.Body
+ // and close the body before we close cs.peerClosed, causing
+ // cleanupWriteRequest to send a RST_STREAM.
+ rl.cc.mu.Lock()
+ defer rl.cc.mu.Unlock()
+ cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers)
+ close(cs.peerClosed)
+ }
}
func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
- var code func()
- if err == nil {
- err = io.EOF
- code = cs.copyTrailers
- }
- if isConnectionCloseRequest(cs.req) {
- rl.closeWhenIdle = true
- }
- cs.bufPipe.closeWithErrorAndCode(err, code)
+ cs.readAborted = true
+ cs.abortStream(err)
+}
- select {
- case cs.resc <- resAndError{err: err}:
- default:
+func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream {
+ rl.cc.mu.Lock()
+ defer rl.cc.mu.Unlock()
+ cs := rl.cc.streams[id]
+ if cs != nil && !cs.readAborted {
+ return cs
}
+ return nil
}
func (cs *clientStream) copyTrailers() {
@@ -2322,6 +2618,10 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
if f.ErrCode != 0 {
// TODO: deal with GOAWAY more. particularly the error code
cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ if fn := cc.t.CountError; fn != nil {
+ fn("recv_goaway_" + f.ErrCode.stringToken())
+ }
+
}
cc.setGoAway(f)
return nil
@@ -2329,6 +2629,23 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc := rl.cc
+ // Locking both mu and wmu here allows frame encoding to read settings with only wmu held.
+ // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless.
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ if err := rl.processSettingsNoWrite(f); err != nil {
+ return err
+ }
+ if !f.IsAck() {
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+ }
+ return nil
+}
+
+func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
+ cc := rl.cc
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -2340,12 +2657,14 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
return ConnectionError(ErrCodeProtocol)
}
+ var seenMaxConcurrentStreams bool
err := f.ForeachSetting(func(s Setting) error {
switch s.ID {
case SettingMaxFrameSize:
cc.maxFrameSize = s.Val
case SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val
+ seenMaxConcurrentStreams = true
case SettingMaxHeaderListSize:
cc.peerMaxHeaderListSize = uint64(s.Val)
case SettingInitialWindowSize:
@@ -2377,17 +2696,23 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
return err
}
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
+ if !cc.seenSettings {
+ if !seenMaxConcurrentStreams {
+ // This was the servers initial SETTINGS frame and it
+ // didn't contain a MAX_CONCURRENT_STREAMS field so
+ // increase the number of concurrent streams this
+ // connection can establish to our default.
+ cc.maxConcurrentStreams = defaultMaxConcurrentStreams
+ }
+ cc.seenSettings = true
+ }
- cc.fr.WriteSettingsAck()
- cc.bw.Flush()
- return cc.werr
+ return nil
}
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
cc := rl.cc
- cs := cc.streamByID(f.StreamID, false)
+ cs := rl.streamByID(f.StreamID)
if f.StreamID != 0 && cs == nil {
return nil
}
@@ -2407,24 +2732,22 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
}
func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
- cs := rl.cc.streamByID(f.StreamID, true)
+ cs := rl.streamByID(f.StreamID)
if cs == nil {
- // TODO: return error if server tries to RST_STEAM an idle stream
+ // TODO: return error if server tries to RST_STREAM an idle stream
return nil
}
- select {
- case <-cs.peerReset:
- // Already reset.
- // This is the only goroutine
- // which closes this, so there
- // isn't a race.
- default:
- err := streamError(cs.ID, f.ErrCode)
- cs.resetErr = err
- close(cs.peerReset)
- cs.bufPipe.CloseWithError(err)
- cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ serr := streamError(cs.ID, f.ErrCode)
+ serr.Cause = errFromPeer
+ if f.ErrCode == ErrCodeProtocol {
+ rl.cc.SetDoNotReuse()
}
+ if fn := cs.cc.t.CountError; fn != nil {
+ fn("recv_rststream_" + f.ErrCode.stringToken())
+ }
+ cs.abortStream(serr)
+
+ cs.bufPipe.CloseWithError(serr)
return nil
}
@@ -2446,19 +2769,24 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
}
cc.mu.Unlock()
}
- cc.wmu.Lock()
- if err := cc.fr.WritePing(false, p); err != nil {
- cc.wmu.Unlock()
- return err
- }
- if err := cc.bw.Flush(); err != nil {
- cc.wmu.Unlock()
- return err
- }
- cc.wmu.Unlock()
+ errc := make(chan error, 1)
+ go func() {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(false, p); err != nil {
+ errc <- err
+ return
+ }
+ if err := cc.bw.Flush(); err != nil {
+ errc <- err
+ return
+ }
+ }()
select {
case <-c:
return nil
+ case err := <-errc:
+ return err
case <-ctx.Done():
return ctx.Err()
case <-cc.readerDone:
@@ -2535,6 +2863,11 @@ func (t *Transport) logf(format string, args ...interface{}) {
var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+type missingBody struct{}
+
+func (missingBody) Close() error { return nil }
+func (missingBody) Read([]byte) (int, error) { return 0, io.ErrUnexpectedEOF }
+
func strSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
@@ -2580,87 +2913,6 @@ type errorReader struct{ err error }
func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
-// bodyWriterState encapsulates various state around the Transport's writing
-// of the request body, particularly regarding doing delayed writes of the body
-// when the request contains "Expect: 100-continue".
-type bodyWriterState struct {
- cs *clientStream
- timer *time.Timer // if non-nil, we're doing a delayed write
- fnonce *sync.Once // to call fn with
- fn func() // the code to run in the goroutine, writing the body
- resc chan error // result of fn's execution
- delay time.Duration // how long we should delay a delayed write for
-}
-
-func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {
- s.cs = cs
- if body == nil {
- return
- }
- resc := make(chan error, 1)
- s.resc = resc
- s.fn = func() {
- cs.cc.mu.Lock()
- cs.startedWrite = true
- cs.cc.mu.Unlock()
- resc <- cs.writeRequestBody(body, cs.req.Body)
- }
- s.delay = t.expectContinueTimeout()
- if s.delay == 0 ||
- !httpguts.HeaderValuesContainsToken(
- cs.req.Header["Expect"],
- "100-continue") {
- return
- }
- s.fnonce = new(sync.Once)
-
- // Arm the timer with a very large duration, which we'll
- // intentionally lower later. It has to be large now because
- // we need a handle to it before writing the headers, but the
- // s.delay value is defined to not start until after the
- // request headers were written.
- const hugeDuration = 365 * 24 * time.Hour
- s.timer = time.AfterFunc(hugeDuration, func() {
- s.fnonce.Do(s.fn)
- })
- return
-}
-
-func (s bodyWriterState) cancel() {
- if s.timer != nil {
- if s.timer.Stop() {
- s.resc <- nil
- }
- }
-}
-
-func (s bodyWriterState) on100() {
- if s.timer == nil {
- // If we didn't do a delayed write, ignore the server's
- // bogus 100 continue response.
- return
- }
- s.timer.Stop()
- go func() { s.fnonce.Do(s.fn) }()
-}
-
-// scheduleBodyWrite starts writing the body, either immediately (in
-// the common case) or after the delay timeout. It should not be
-// called until after the headers have been written.
-func (s bodyWriterState) scheduleBodyWrite() {
- if s.timer == nil {
- // We're not doing a delayed write (see
- // getBodyWriterState), so just start the writing
- // goroutine immediately.
- go s.fn()
- return
- }
- traceWait100Continue(s.cs.trace)
- if s.timer.Stop() {
- s.timer.Reset(s.delay)
- }
-}
-
// isConnectionCloseRequest reports whether req should use its own
// connection for a single request and then close the connection.
func isConnectionCloseRequest(req *http.Request) bool {
diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go
new file mode 100644
index 000000000..c5c4338db
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/go118.go
@@ -0,0 +1,14 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package idna
+
+// Transitional processing is disabled by default in Go 1.18.
+// https://golang.org/issue/47510
+const transitionalLookup = false
diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go
index 5208ba6cb..64ccf85fe 100644
--- a/vendor/golang.org/x/net/idna/idna10.0.0.go
+++ b/vendor/golang.org/x/net/idna/idna10.0.0.go
@@ -59,10 +59,10 @@ type Option func(*options)
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
-// compatibility. It is used by most browsers when resolving domain names. This
+// compatibility. It is used by some browsers when resolving domain names. This
// option is only meaningful if combined with MapForLookup.
func Transitional(transitional bool) Option {
- return func(o *options) { o.transitional = true }
+ return func(o *options) { o.transitional = transitional }
}
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
@@ -284,7 +284,7 @@ var (
punycode = &Profile{}
lookup = &Profile{options{
- transitional: true,
+ transitional: transitionalLookup,
useSTD3Rules: true,
checkHyphens: true,
checkJoiners: true,
diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go
index 55f718f12..aae6aac87 100644
--- a/vendor/golang.org/x/net/idna/idna9.0.0.go
+++ b/vendor/golang.org/x/net/idna/idna9.0.0.go
@@ -58,10 +58,10 @@ type Option func(*options)
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
-// compatibility. It is used by most browsers when resolving domain names. This
+// compatibility. It is used by some browsers when resolving domain names. This
// option is only meaningful if combined with MapForLookup.
func Transitional(transitional bool) Option {
- return func(o *options) { o.transitional = true }
+ return func(o *options) { o.transitional = transitional }
}
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go
new file mode 100644
index 000000000..3aaccab1c
--- /dev/null
+++ b/vendor/golang.org/x/net/idna/pre_go118.go
@@ -0,0 +1,12 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package idna
+
+const transitionalLookup = true
diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go
index 02c7d59af..e8e3ac11a 100644
--- a/vendor/golang.org/x/net/idna/punycode.go
+++ b/vendor/golang.org/x/net/idna/punycode.go
@@ -49,6 +49,7 @@ func decode(encoded string) (string, error) {
}
}
i, n, bias := int32(0), initialN, initialBias
+ overflow := false
for pos < len(encoded) {
oldI, w := i, int32(1)
for k := base; ; k += base {
@@ -60,29 +61,32 @@ func decode(encoded string) (string, error) {
return "", punyError(encoded)
}
pos++
- i += digit * w
- if i < 0 {
+ i, overflow = madd(i, digit, w)
+ if overflow {
return "", punyError(encoded)
}
t := k - bias
- if t < tmin {
+ if k <= bias {
t = tmin
- } else if t > tmax {
+ } else if k >= bias+tmax {
t = tmax
}
if digit < t {
break
}
- w *= base - t
- if w >= math.MaxInt32/base {
+ w, overflow = madd(0, w, base-t)
+ if overflow {
return "", punyError(encoded)
}
}
+ if len(output) >= 1024 {
+ return "", punyError(encoded)
+ }
x := int32(len(output) + 1)
bias = adapt(i-oldI, x, oldI == 0)
n += i / x
i %= x
- if n > utf8.MaxRune || len(output) >= 1024 {
+ if n < 0 || n > utf8.MaxRune {
return "", punyError(encoded)
}
output = append(output, 0)
@@ -115,6 +119,7 @@ func encode(prefix, s string) (string, error) {
if b > 0 {
output = append(output, '-')
}
+ overflow := false
for remaining != 0 {
m := int32(0x7fffffff)
for _, r := range s {
@@ -122,8 +127,8 @@ func encode(prefix, s string) (string, error) {
m = r
}
}
- delta += (m - n) * (h + 1)
- if delta < 0 {
+ delta, overflow = madd(delta, m-n, h+1)
+ if overflow {
return "", punyError(s)
}
n = m
@@ -141,9 +146,9 @@ func encode(prefix, s string) (string, error) {
q := delta
for k := base; ; k += base {
t := k - bias
- if t < tmin {
+ if k <= bias {
t = tmin
- } else if t > tmax {
+ } else if k >= bias+tmax {
t = tmax
}
if q < t {
@@ -164,6 +169,15 @@ func encode(prefix, s string) (string, error) {
return string(output), nil
}
+// madd computes a + (b * c), detecting overflow.
+func madd(a, b, c int32) (next int32, overflow bool) {
+ p := int64(b) * int64(c)
+ if p > math.MaxInt32-int64(a) {
+ return 0, true
+ }
+ return a + int32(p), false
+}
+
func decodeDigit(x byte) (digit int32, ok bool) {
switch {
case '0' <= x && x <= '9':