summaryrefslogtreecommitdiff
path: root/vendor/golang.org/x/net
diff options
context:
space:
mode:
authorDaniel J Walsh <dwalsh@redhat.com>2021-10-15 13:14:39 -0400
committerDaniel J Walsh <dwalsh@redhat.com>2021-10-18 14:44:10 -0400
commit087f8fc73bec664a30dcf0757cd3cb44ea150582 (patch)
treeb2a04eb4de47d044c6644dcc2da366935a0f9214 /vendor/golang.org/x/net
parente0ffc431fe7f016124fdcb36819698a90fe448a9 (diff)
downloadpodman-087f8fc73bec664a30dcf0757cd3cb44ea150582.tar.gz
podman-087f8fc73bec664a30dcf0757cd3cb44ea150582.tar.bz2
podman-087f8fc73bec664a30dcf0757cd3cb44ea150582.zip
Allow API to specify size and inode quota
Fixes: https://github.com/containers/podman/issues/11016 [NO NEW TESTS NEEDED] We have no easy way to tests this in CI/CD systems. Requires quota to be setup on directories to work. Fixes: https://github.com/containers/podman/issues/11016 Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Diffstat (limited to 'vendor/golang.org/x/net')
-rw-r--r--vendor/golang.org/x/net/http2/README20
-rw-r--r--vendor/golang.org/x/net/http2/ascii.go4
-rw-r--r--vendor/golang.org/x/net/http2/server.go23
-rw-r--r--vendor/golang.org/x/net/http2/transport.go156
4 files changed, 93 insertions, 110 deletions
diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README
deleted file mode 100644
index 360d5aa37..000000000
--- a/vendor/golang.org/x/net/http2/README
+++ /dev/null
@@ -1,20 +0,0 @@
-This is a work-in-progress HTTP/2 implementation for Go.
-
-It will eventually live in the Go standard library and won't require
-any changes to your code to use. It will just be automatic.
-
-Status:
-
-* The server support is pretty good. A few things are missing
- but are being worked on.
-* The client work has just started but shares a lot of code
- is coming along much quicker.
-
-Docs are at https://godoc.org/golang.org/x/net/http2
-
-Demo test server at https://http2.golang.org/
-
-Help & bug reports welcome!
-
-Contributing: https://golang.org/doc/contribute.html
-Bugs: https://golang.org/issue/new?title=x/net/http2:+
diff --git a/vendor/golang.org/x/net/http2/ascii.go b/vendor/golang.org/x/net/http2/ascii.go
index 0c58d727c..17caa2058 100644
--- a/vendor/golang.org/x/net/http2/ascii.go
+++ b/vendor/golang.org/x/net/http2/ascii.go
@@ -6,6 +6,10 @@ package http2
import "strings"
+// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
+// contains helper functions which may use Unicode-aware functions which would
+// otherwise be unsafe and could introduce vulnerabilities if used improperly.
+
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
// are equal, ASCII-case-insensitively.
func asciiEqualFold(s, t string) bool {
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 09bc70533..19e449cfa 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -259,16 +259,12 @@ func ConfigureServer(s *http.Server, conf *Server) error {
s.TLSConfig.PreferServerCipherSuites = true
- haveNPN := false
- for _, p := range s.TLSConfig.NextProtos {
- if p == NextProtoTLS {
- haveNPN = true
- break
- }
- }
- if !haveNPN {
+ if !strSliceContains(s.TLSConfig.NextProtos, NextProtoTLS) {
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
}
+ if !strSliceContains(s.TLSConfig.NextProtos, "http/1.1") {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "http/1.1")
+ }
if s.TLSNextProto == nil {
s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
@@ -820,7 +816,7 @@ func (sc *serverConn) serve() {
})
sc.unackedSettings++
- // Each connection starts with intialWindowSize inflow tokens.
+ // Each connection starts with initialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
@@ -860,6 +856,15 @@ func (sc *serverConn) serve() {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
+ // Process any written frames before reading new frames from the client since a
+ // written frame could have triggered a new stream to be started.
+ if sc.writingFrameAsync {
+ select {
+ case wroteRes := <-sc.wroteFrameCh:
+ sc.wroteFrame(wroteRes)
+ default:
+ }
+ }
if !sc.processFrameFromReader(res) {
return
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 7bd4b9c19..b261beb1d 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -264,9 +264,8 @@ type ClientConn struct {
peerMaxHeaderListSize uint64
initialWindowSize uint32
- hbuf bytes.Buffer // HPACK encoder writes into this
- henc *hpack.Encoder
- freeBuf [][]byte
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
werr error // first write error that has occurred
@@ -386,8 +385,13 @@ func (cs *clientStream) abortRequestBodyWrite(err error) {
}
cc := cs.cc
cc.mu.Lock()
- cs.stopReqBody = err
- cc.cond.Broadcast()
+ if cs.stopReqBody == nil {
+ cs.stopReqBody = err
+ if cs.req.Body != nil {
+ cs.req.Body.Close()
+ }
+ cc.cond.Broadcast()
+ }
cc.mu.Unlock()
}
@@ -913,46 +917,6 @@ func (cc *ClientConn) closeForLostPing() error {
return cc.closeForError(err)
}
-const maxAllocFrameSize = 512 << 10
-
-// frameBuffer returns a scratch buffer suitable for writing DATA frames.
-// They're capped at the min of the peer's max frame size or 512KB
-// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
-// bufers.
-func (cc *ClientConn) frameScratchBuffer() []byte {
- cc.mu.Lock()
- size := cc.maxFrameSize
- if size > maxAllocFrameSize {
- size = maxAllocFrameSize
- }
- for i, buf := range cc.freeBuf {
- if len(buf) >= int(size) {
- cc.freeBuf[i] = nil
- cc.mu.Unlock()
- return buf[:size]
- }
- }
- cc.mu.Unlock()
- return make([]byte, size)
-}
-
-func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
- if len(cc.freeBuf) < maxBufs {
- cc.freeBuf = append(cc.freeBuf, buf)
- return
- }
- for i, old := range cc.freeBuf {
- if old == nil {
- cc.freeBuf[i] = buf
- return
- }
- }
- // forget about it.
-}
-
// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
var errRequestCanceled = errors.New("net/http: request canceled")
@@ -1151,40 +1115,28 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
return res, false, nil
}
+ handleError := func(err error) (*http.Response, bool, error) {
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ <-bodyWriter.resc
+ }
+ cc.forgetStreamID(cs.ID)
+ return nil, cs.getStartedWrite(), err
+ }
+
for {
select {
case re := <-readLoopResCh:
return handleReadLoopResponse(re)
case <-respHeaderTimer:
- if !hasBody || bodyWritten {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- } else {
- bodyWriter.cancel()
- cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
- <-bodyWriter.resc
- }
- cc.forgetStreamID(cs.ID)
- return nil, cs.getStartedWrite(), errTimeout
+ return handleError(errTimeout)
case <-ctx.Done():
- if !hasBody || bodyWritten {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- } else {
- bodyWriter.cancel()
- cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
- <-bodyWriter.resc
- }
- cc.forgetStreamID(cs.ID)
- return nil, cs.getStartedWrite(), ctx.Err()
+ return handleError(ctx.Err())
case <-req.Cancel:
- if !hasBody || bodyWritten {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- } else {
- bodyWriter.cancel()
- cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
- <-bodyWriter.resc
- }
- cc.forgetStreamID(cs.ID)
- return nil, cs.getStartedWrite(), errRequestCanceled
+ return handleError(errRequestCanceled)
case <-cs.peerReset:
// processResetStream already removed the
// stream from the streams map; no need for
@@ -1295,11 +1247,35 @@ var (
errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
)
+// frameScratchBufferLen returns the length of a buffer to use for
+// outgoing request bodies to read/write to/from.
+//
+// It returns max(1, min(peer's advertised max frame size,
+// Request.ContentLength+1, 512KB)).
+func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
+ const max = 512 << 10
+ n := int64(maxFrameSize)
+ if n > max {
+ n = max
+ }
+ if cl := actualContentLength(cs.req); cl != -1 && cl+1 < n {
+ // Add an extra byte past the declared content-length to
+ // give the caller's Request.Body io.Reader a chance to
+ // give us more bytes than they declared, so we can catch it
+ // early.
+ n = cl + 1
+ }
+ if n < 1 {
+ return 1
+ }
+ return int(n) // doesn't truncate; max is 512K
+}
+
+var bufPool sync.Pool // of *[]byte
+
func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
cc := cs.cc
sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
- buf := cc.frameScratchBuffer()
- defer cc.putFrameScratchBuffer(buf)
defer func() {
traceWroteRequest(cs.trace, err)
@@ -1307,7 +1283,13 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
// Request.Body is closed by the Transport,
// and in multiple cases: server replies <=299 and >299
// while still writing request body
- cerr := bodyCloser.Close()
+ var cerr error
+ cc.mu.Lock()
+ if cs.stopReqBody == nil {
+ cs.stopReqBody = errStopReqBodyWrite
+ cerr = bodyCloser.Close()
+ }
+ cc.mu.Unlock()
if err == nil {
err = cerr
}
@@ -1318,9 +1300,24 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
remainLen := actualContentLength(req)
hasContentLen := remainLen != -1
+ cc.mu.Lock()
+ maxFrameSize := int(cc.maxFrameSize)
+ cc.mu.Unlock()
+
+ // Scratch buffer for reading into & writing from.
+ scratchLen := cs.frameScratchBufferLen(maxFrameSize)
+ var buf []byte
+ if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
+ defer bufPool.Put(bp)
+ buf = *bp
+ } else {
+ buf = make([]byte, scratchLen)
+ defer bufPool.Put(&buf)
+ }
+
var sawEOF bool
for !sawEOF {
- n, err := body.Read(buf[:len(buf)-1])
+ n, err := body.Read(buf[:len(buf)])
if hasContentLen {
remainLen -= int64(n)
if remainLen == 0 && err == nil {
@@ -1331,8 +1328,9 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
// to send the END_STREAM bit early, double-check that we're actually
// at EOF. Subsequent reads should return (0, EOF) at this point.
// If either value is different, we return an error in one of two ways below.
+ var scratch [1]byte
var n1 int
- n1, err = body.Read(buf[n:])
+ n1, err = body.Read(scratch[:])
remainLen -= int64(n1)
}
if remainLen < 0 {
@@ -1402,10 +1400,6 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
}
}
- cc.mu.Lock()
- maxFrameSize := int(cc.maxFrameSize)
- cc.mu.Unlock()
-
cc.wmu.Lock()
defer cc.wmu.Unlock()