summaryrefslogtreecommitdiff
path: root/vendor/k8s.io/apimachinery/pkg/util
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/k8s.io/apimachinery/pkg/util')
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/clock/clock.go407
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/framer/framer.go170
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go19
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go157
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go187
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go369
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go120
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go53
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go379
9 files changed, 0 insertions, 1861 deletions
diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
deleted file mode 100644
index 3e1e2517b..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package clock
-
-import (
- "sync"
- "time"
-)
-
-// PassiveClock allows for injecting fake or real clocks into code
-// that needs to read the current time but does not support scheduling
-// activity in the future.
-type PassiveClock interface {
- Now() time.Time
- Since(time.Time) time.Duration
-}
-
-// Clock allows for injecting fake or real clocks into code that
-// needs to do arbitrary things based on time.
-type Clock interface {
- PassiveClock
- After(time.Duration) <-chan time.Time
- NewTimer(time.Duration) Timer
- Sleep(time.Duration)
- NewTicker(time.Duration) Ticker
-}
-
-// RealClock really calls time.Now()
-type RealClock struct{}
-
-// Now returns the current time.
-func (RealClock) Now() time.Time {
- return time.Now()
-}
-
-// Since returns time since the specified timestamp.
-func (RealClock) Since(ts time.Time) time.Duration {
- return time.Since(ts)
-}
-
-// After is the same as time.After(d).
-func (RealClock) After(d time.Duration) <-chan time.Time {
- return time.After(d)
-}
-
-// NewTimer returns a new Timer.
-func (RealClock) NewTimer(d time.Duration) Timer {
- return &realTimer{
- timer: time.NewTimer(d),
- }
-}
-
-// NewTicker returns a new Ticker.
-func (RealClock) NewTicker(d time.Duration) Ticker {
- return &realTicker{
- ticker: time.NewTicker(d),
- }
-}
-
-// Sleep pauses the RealClock for duration d.
-func (RealClock) Sleep(d time.Duration) {
- time.Sleep(d)
-}
-
-// FakePassiveClock implements PassiveClock, but returns an arbitrary time.
-type FakePassiveClock struct {
- lock sync.RWMutex
- time time.Time
-}
-
-// FakeClock implements Clock, but returns an arbitrary time.
-type FakeClock struct {
- FakePassiveClock
-
- // waiters are waiting for the fake time to pass their specified time
- waiters []fakeClockWaiter
-}
-
-type fakeClockWaiter struct {
- targetTime time.Time
- stepInterval time.Duration
- skipIfBlocked bool
- destChan chan time.Time
-}
-
-// NewFakePassiveClock returns a new FakePassiveClock.
-func NewFakePassiveClock(t time.Time) *FakePassiveClock {
- return &FakePassiveClock{
- time: t,
- }
-}
-
-// NewFakeClock returns a new FakeClock
-func NewFakeClock(t time.Time) *FakeClock {
- return &FakeClock{
- FakePassiveClock: *NewFakePassiveClock(t),
- }
-}
-
-// Now returns f's time.
-func (f *FakePassiveClock) Now() time.Time {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return f.time
-}
-
-// Since returns time since the time in f.
-func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return f.time.Sub(ts)
-}
-
-// SetTime sets the time on the FakePassiveClock.
-func (f *FakePassiveClock) SetTime(t time.Time) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.time = t
-}
-
-// After is the Fake version of time.After(d).
-func (f *FakeClock) After(d time.Duration) <-chan time.Time {
- f.lock.Lock()
- defer f.lock.Unlock()
- stopTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // Don't block!
- f.waiters = append(f.waiters, fakeClockWaiter{
- targetTime: stopTime,
- destChan: ch,
- })
- return ch
-}
-
-// NewTimer is the Fake version of time.NewTimer(d).
-func (f *FakeClock) NewTimer(d time.Duration) Timer {
- f.lock.Lock()
- defer f.lock.Unlock()
- stopTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // Don't block!
- timer := &fakeTimer{
- fakeClock: f,
- waiter: fakeClockWaiter{
- targetTime: stopTime,
- destChan: ch,
- },
- }
- f.waiters = append(f.waiters, timer.waiter)
- return timer
-}
-
-// NewTicker returns a new Ticker.
-func (f *FakeClock) NewTicker(d time.Duration) Ticker {
- f.lock.Lock()
- defer f.lock.Unlock()
- tickTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // hold one tick
- f.waiters = append(f.waiters, fakeClockWaiter{
- targetTime: tickTime,
- stepInterval: d,
- skipIfBlocked: true,
- destChan: ch,
- })
-
- return &fakeTicker{
- c: ch,
- }
-}
-
-// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer
-func (f *FakeClock) Step(d time.Duration) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.setTimeLocked(f.time.Add(d))
-}
-
-// SetTime sets the time on a FakeClock.
-func (f *FakeClock) SetTime(t time.Time) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.setTimeLocked(t)
-}
-
-// Actually changes the time and checks any waiters. f must be write-locked.
-func (f *FakeClock) setTimeLocked(t time.Time) {
- f.time = t
- newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
- for i := range f.waiters {
- w := &f.waiters[i]
- if !w.targetTime.After(t) {
-
- if w.skipIfBlocked {
- select {
- case w.destChan <- t:
- default:
- }
- } else {
- w.destChan <- t
- }
-
- if w.stepInterval > 0 {
- for !w.targetTime.After(t) {
- w.targetTime = w.targetTime.Add(w.stepInterval)
- }
- newWaiters = append(newWaiters, *w)
- }
-
- } else {
- newWaiters = append(newWaiters, f.waiters[i])
- }
- }
- f.waiters = newWaiters
-}
-
-// HasWaiters returns true if After has been called on f but not yet satisfied (so you can
-// write race-free tests).
-func (f *FakeClock) HasWaiters() bool {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return len(f.waiters) > 0
-}
-
-// Sleep pauses the FakeClock for duration d.
-func (f *FakeClock) Sleep(d time.Duration) {
- f.Step(d)
-}
-
-// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
-type IntervalClock struct {
- Time time.Time
- Duration time.Duration
-}
-
-// Now returns i's time.
-func (i *IntervalClock) Now() time.Time {
- i.Time = i.Time.Add(i.Duration)
- return i.Time
-}
-
-// Since returns time since the time in i.
-func (i *IntervalClock) Since(ts time.Time) time.Duration {
- return i.Time.Sub(ts)
-}
-
-// After is currently unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) After(d time.Duration) <-chan time.Time {
- panic("IntervalClock doesn't implement After")
-}
-
-// NewTimer is currently unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) NewTimer(d time.Duration) Timer {
- panic("IntervalClock doesn't implement NewTimer")
-}
-
-// NewTicker is currently unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) NewTicker(d time.Duration) Ticker {
- panic("IntervalClock doesn't implement NewTicker")
-}
-
-// Sleep is currently unimplemented; will panic.
-func (*IntervalClock) Sleep(d time.Duration) {
- panic("IntervalClock doesn't implement Sleep")
-}
-
-// Timer allows for injecting fake or real timers into code that
-// needs to do arbitrary things based on time.
-type Timer interface {
- C() <-chan time.Time
- Stop() bool
- Reset(d time.Duration) bool
-}
-
-// realTimer is backed by an actual time.Timer.
-type realTimer struct {
- timer *time.Timer
-}
-
-// C returns the underlying timer's channel.
-func (r *realTimer) C() <-chan time.Time {
- return r.timer.C
-}
-
-// Stop calls Stop() on the underlying timer.
-func (r *realTimer) Stop() bool {
- return r.timer.Stop()
-}
-
-// Reset calls Reset() on the underlying timer.
-func (r *realTimer) Reset(d time.Duration) bool {
- return r.timer.Reset(d)
-}
-
-// fakeTimer implements Timer based on a FakeClock.
-type fakeTimer struct {
- fakeClock *FakeClock
- waiter fakeClockWaiter
-}
-
-// C returns the channel that notifies when this timer has fired.
-func (f *fakeTimer) C() <-chan time.Time {
- return f.waiter.destChan
-}
-
-// Stop conditionally stops the timer. If the timer has neither fired
-// nor been stopped then this call stops the timer and returns true,
-// otherwise this call returns false. This is like time.Timer::Stop.
-func (f *fakeTimer) Stop() bool {
- f.fakeClock.lock.Lock()
- defer f.fakeClock.lock.Unlock()
- // The timer has already fired or been stopped, unless it is found
- // among the clock's waiters.
- stopped := false
- oldWaiters := f.fakeClock.waiters
- newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters))
- seekChan := f.waiter.destChan
- for i := range oldWaiters {
- // Identify the timer's fakeClockWaiter by the identity of the
- // destination channel, nothing else is necessarily unique and
- // constant since the timer's creation.
- if oldWaiters[i].destChan == seekChan {
- stopped = true
- } else {
- newWaiters = append(newWaiters, oldWaiters[i])
- }
- }
-
- f.fakeClock.waiters = newWaiters
-
- return stopped
-}
-
-// Reset conditionally updates the firing time of the timer. If the
-// timer has neither fired nor been stopped then this call resets the
-// timer to the fake clock's "now" + d and returns true, otherwise
-// it creates a new waiter, adds it to the clock, and returns true.
-//
-// It is not possible to return false, because a fake timer can be reset
-// from any state (waiting to fire, already fired, and stopped).
-//
-// See the GoDoc for time.Timer::Reset for more context on why
-// the return value of Reset() is not useful.
-func (f *fakeTimer) Reset(d time.Duration) bool {
- f.fakeClock.lock.Lock()
- defer f.fakeClock.lock.Unlock()
- waiters := f.fakeClock.waiters
- seekChan := f.waiter.destChan
- for i := range waiters {
- if waiters[i].destChan == seekChan {
- waiters[i].targetTime = f.fakeClock.time.Add(d)
- return true
- }
- }
- // No existing waiter, timer has already fired or been reset.
- // We should still enable Reset() to succeed by creating a
- // new waiter and adding it to the clock's waiters.
- newWaiter := fakeClockWaiter{
- targetTime: f.fakeClock.time.Add(d),
- destChan: seekChan,
- }
- f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter)
- return true
-}
-
-// Ticker defines the Ticker interface
-type Ticker interface {
- C() <-chan time.Time
- Stop()
-}
-
-type realTicker struct {
- ticker *time.Ticker
-}
-
-func (t *realTicker) C() <-chan time.Time {
- return t.ticker.C
-}
-
-func (t *realTicker) Stop() {
- t.ticker.Stop()
-}
-
-type fakeTicker struct {
- c <-chan time.Time
-}
-
-func (t *fakeTicker) C() <-chan time.Time {
- return t.c
-}
-
-func (t *fakeTicker) Stop() {
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
deleted file mode 100644
index 45aa74bf5..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package framer implements simple frame decoding techniques for an io.ReadCloser
-package framer
-
-import (
- "encoding/binary"
- "encoding/json"
- "io"
-)
-
-type lengthDelimitedFrameWriter struct {
- w io.Writer
- h [4]byte
-}
-
-func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer {
- return &lengthDelimitedFrameWriter{w: w}
-}
-
-// Write writes a single frame to the nested writer, prepending it with the length in
-// in bytes of data (as a 4 byte, bigendian uint32).
-func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) {
- binary.BigEndian.PutUint32(w.h[:], uint32(len(data)))
- n, err := w.w.Write(w.h[:])
- if err != nil {
- return 0, err
- }
- if n != len(w.h) {
- return 0, io.ErrShortWrite
- }
- return w.w.Write(data)
-}
-
-type lengthDelimitedFrameReader struct {
- r io.ReadCloser
- remaining int
-}
-
-// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed
-// frames off of a stream.
-//
-// The protocol is:
-//
-// stream: message ...
-// message: prefix body
-// prefix: 4 byte uint32 in BigEndian order, denotes length of body
-// body: bytes (0..prefix)
-//
-// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead
-// will be returned along with the number of bytes read.
-func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser {
- return &lengthDelimitedFrameReader{r: r}
-}
-
-// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer
-// is returned and subsequent calls will attempt to read the last frame. A frame is complete when
-// err is nil.
-func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
- if r.remaining <= 0 {
- header := [4]byte{}
- n, err := io.ReadAtLeast(r.r, header[:4], 4)
- if err != nil {
- return 0, err
- }
- if n != 4 {
- return 0, io.ErrUnexpectedEOF
- }
- frameLength := int(binary.BigEndian.Uint32(header[:]))
- r.remaining = frameLength
- }
-
- expect := r.remaining
- max := expect
- if max > len(data) {
- max = len(data)
- }
- n, err := io.ReadAtLeast(r.r, data[:max], int(max))
- r.remaining -= n
- if err == io.ErrShortBuffer || r.remaining > 0 {
- return n, io.ErrShortBuffer
- }
- if err != nil {
- return n, err
- }
- if n != expect {
- return n, io.ErrUnexpectedEOF
- }
-
- return n, nil
-}
-
-func (r *lengthDelimitedFrameReader) Close() error {
- return r.r.Close()
-}
-
-type jsonFrameReader struct {
- r io.ReadCloser
- decoder *json.Decoder
- remaining []byte
-}
-
-// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off
-// of a wire.
-//
-// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate
-// the read.
-func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser {
- return &jsonFrameReader{
- r: r,
- decoder: json.NewDecoder(r),
- }
-}
-
-// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned
-// byte slice will be modified the next time ReadFrame is invoked and should not be altered.
-func (r *jsonFrameReader) Read(data []byte) (int, error) {
- // Return whatever remaining data exists from an in progress frame
- if n := len(r.remaining); n > 0 {
- if n <= len(data) {
- //lint:ignore SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], r.remaining...)
- r.remaining = nil
- return n, nil
- }
-
- n = len(data)
- //lint:ignore SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], r.remaining[:n]...)
- r.remaining = r.remaining[n:]
- return n, io.ErrShortBuffer
- }
-
- // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
- // data written to data, or be larger than data and a different array.
- n := len(data)
- m := json.RawMessage(data[:0])
- if err := r.decoder.Decode(&m); err != nil {
- return 0, err
- }
-
- // If capacity of data is less than length of the message, decoder will allocate a new slice
- // and set m to it, which means we need to copy the partial result back into data and preserve
- // the remaining result for subsequent reads.
- if len(m) > n {
- //lint:ignore SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], m[:n]...)
- r.remaining = m[n:]
- return n, io.ErrShortBuffer
- }
- return len(m), nil
-}
-
-func (r *jsonFrameReader) Close() error {
- return r.r.Close()
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
deleted file mode 100644
index 5893df5bd..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package httpstream adds multiplexed streaming support to HTTP requests and
-// responses via connection upgrades.
-package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
deleted file mode 100644
index 00ce5f785..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package httpstream
-
-import (
- "fmt"
- "io"
- "net/http"
- "strings"
- "time"
-)
-
-const (
- HeaderConnection = "Connection"
- HeaderUpgrade = "Upgrade"
- HeaderProtocolVersion = "X-Stream-Protocol-Version"
- HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions"
-)
-
-// NewStreamHandler defines a function that is called when a new Stream is
-// received. If no error is returned, the Stream is accepted; otherwise,
-// the stream is rejected. After the reply frame has been sent, replySent is closed.
-type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error
-
-// NoOpNewStreamHandler is a stream handler that accepts a new stream and
-// performs no other logic.
-func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil }
-
-// Dialer knows how to open a streaming connection to a server.
-type Dialer interface {
-
- // Dial opens a streaming connection to a server using one of the protocols
- // specified (in order of most preferred to least preferred).
- Dial(protocols ...string) (Connection, string, error)
-}
-
-// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade
-// HTTP requests to support multiplexed bidirectional streams. After RoundTrip()
-// is invoked, if the upgrade is successful, clients may retrieve the upgraded
-// connection by calling UpgradeRoundTripper.Connection().
-type UpgradeRoundTripper interface {
- http.RoundTripper
- // NewConnection validates the response and creates a new Connection.
- NewConnection(resp *http.Response) (Connection, error)
-}
-
-// ResponseUpgrader knows how to upgrade HTTP requests and responses to
-// add streaming support to them.
-type ResponseUpgrader interface {
- // UpgradeResponse upgrades an HTTP response to one that supports multiplexed
- // streams. newStreamHandler will be called asynchronously whenever the
- // other end of the upgraded connection creates a new stream.
- UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection
-}
-
-// Connection represents an upgraded HTTP connection.
-type Connection interface {
- // CreateStream creates a new Stream with the supplied headers.
- CreateStream(headers http.Header) (Stream, error)
- // Close resets all streams and closes the connection.
- Close() error
- // CloseChan returns a channel that is closed when the underlying connection is closed.
- CloseChan() <-chan bool
- // SetIdleTimeout sets the amount of time the connection may remain idle before
- // it is automatically closed.
- SetIdleTimeout(timeout time.Duration)
-}
-
-// Stream represents a bidirectional communications channel that is part of an
-// upgraded connection.
-type Stream interface {
- io.ReadWriteCloser
- // Reset closes both directions of the stream, indicating that neither client
- // or server can use it any more.
- Reset() error
- // Headers returns the headers used to create the stream.
- Headers() http.Header
- // Identifier returns the stream's ID.
- Identifier() uint32
-}
-
-// IsUpgradeRequest returns true if the given request is a connection upgrade request
-func IsUpgradeRequest(req *http.Request) bool {
- for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {
- if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) {
- return true
- }
- }
- return false
-}
-
-func negotiateProtocol(clientProtocols, serverProtocols []string) string {
- for i := range clientProtocols {
- for j := range serverProtocols {
- if clientProtocols[i] == serverProtocols[j] {
- return clientProtocols[i]
- }
- }
- }
- return ""
-}
-
-func commaSeparatedHeaderValues(header []string) []string {
- var parsedClientProtocols []string
- for i := range header {
- for _, clientProtocol := range strings.Split(header[i], ",") {
- if proto := strings.Trim(clientProtocol, " "); len(proto) > 0 {
- parsedClientProtocols = append(parsedClientProtocols, proto)
- }
- }
- }
- return parsedClientProtocols
-}
-
-// Handshake performs a subprotocol negotiation. If the client did request a
-// subprotocol, Handshake will select the first common value found in
-// serverProtocols. If a match is found, Handshake adds a response header
-// indicating the chosen subprotocol. If no match is found, HTTP forbidden is
-// returned, along with a response header containing the list of protocols the
-// server can accept.
-func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
- clientProtocols := commaSeparatedHeaderValues(req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)])
- if len(clientProtocols) == 0 {
- return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion)
- }
-
- if len(serverProtocols) == 0 {
- panic(fmt.Errorf("unable to upgrade: serverProtocols is required"))
- }
-
- negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
- if len(negotiatedProtocol) == 0 {
- for i := range serverProtocols {
- w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i])
- }
- err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
- http.Error(w, err.Error(), http.StatusForbidden)
- return "", err
- }
-
- w.Header().Add(HeaderProtocolVersion, negotiatedProtocol)
- return negotiatedProtocol, nil
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
deleted file mode 100644
index 336b4908b..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "net"
- "net/http"
- "sync"
- "time"
-
- "github.com/docker/spdystream"
- "k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/klog/v2"
-)
-
-// connection maintains state about a spdystream.Connection and its associated
-// streams.
-type connection struct {
- conn *spdystream.Connection
- streams []httpstream.Stream
- streamLock sync.Mutex
- newStreamHandler httpstream.NewStreamHandler
- ping func() (time.Duration, error)
-}
-
-// NewClientConnection creates a new SPDY client connection.
-func NewClientConnection(conn net.Conn) (httpstream.Connection, error) {
- return NewClientConnectionWithPings(conn, 0)
-}
-
-// NewClientConnectionWithPings creates a new SPDY client connection.
-//
-// If pingPeriod is non-zero, a background goroutine will send periodic Ping
-// frames to the server. Use this to keep idle connections through certain load
-// balancers alive longer.
-func NewClientConnectionWithPings(conn net.Conn, pingPeriod time.Duration) (httpstream.Connection, error) {
- spdyConn, err := spdystream.NewConnection(conn, false)
- if err != nil {
- defer conn.Close()
- return nil, err
- }
-
- return newConnection(spdyConn, httpstream.NoOpNewStreamHandler, pingPeriod, spdyConn.Ping), nil
-}
-
-// NewServerConnection creates a new SPDY server connection. newStreamHandler
-// will be invoked when the server receives a newly created stream from the
-// client.
-func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) {
- return NewServerConnectionWithPings(conn, newStreamHandler, 0)
-}
-
-// NewServerConnectionWithPings creates a new SPDY server connection.
-// newStreamHandler will be invoked when the server receives a newly created
-// stream from the client.
-//
-// If pingPeriod is non-zero, a background goroutine will send periodic Ping
-// frames to the server. Use this to keep idle connections through certain load
-// balancers alive longer.
-func NewServerConnectionWithPings(conn net.Conn, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration) (httpstream.Connection, error) {
- spdyConn, err := spdystream.NewConnection(conn, true)
- if err != nil {
- defer conn.Close()
- return nil, err
- }
-
- return newConnection(spdyConn, newStreamHandler, pingPeriod, spdyConn.Ping), nil
-}
-
-// newConnection returns a new connection wrapping conn. newStreamHandler
-// will be invoked when the server receives a newly created stream from the
-// client.
-func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection {
- c := &connection{conn: conn, newStreamHandler: newStreamHandler, ping: pingFn}
- go conn.Serve(c.newSpdyStream)
- if pingPeriod > 0 && pingFn != nil {
- go c.sendPings(pingPeriod)
- }
- return c
-}
-
-// createStreamResponseTimeout indicates how long to wait for the other side to
-// acknowledge the new stream before timing out.
-const createStreamResponseTimeout = 30 * time.Second
-
-// Close first sends a reset for all of the connection's streams, and then
-// closes the underlying spdystream.Connection.
-func (c *connection) Close() error {
- c.streamLock.Lock()
- for _, s := range c.streams {
- // calling Reset instead of Close ensures that all streams are fully torn down
- s.Reset()
- }
- c.streams = make([]httpstream.Stream, 0)
- c.streamLock.Unlock()
-
- // now that all streams are fully torn down, it's safe to call close on the underlying connection,
- // which should be able to terminate immediately at this point, instead of waiting for any
- // remaining graceful stream termination.
- return c.conn.Close()
-}
-
-// CreateStream creates a new stream with the specified headers and registers
-// it with the connection.
-func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) {
- stream, err := c.conn.CreateStream(headers, nil, false)
- if err != nil {
- return nil, err
- }
- if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil {
- return nil, err
- }
-
- c.registerStream(stream)
- return stream, nil
-}
-
-// registerStream adds the stream s to the connection's list of streams that
-// it owns.
-func (c *connection) registerStream(s httpstream.Stream) {
- c.streamLock.Lock()
- c.streams = append(c.streams, s)
- c.streamLock.Unlock()
-}
-
-// CloseChan returns a channel that, when closed, indicates that the underlying
-// spdystream.Connection has been closed.
-func (c *connection) CloseChan() <-chan bool {
- return c.conn.CloseChan()
-}
-
-// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve.
-// It calls connection's newStreamHandler, giving it the opportunity to accept or reject
-// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the
-// stream is accepted and registered with the connection.
-func (c *connection) newSpdyStream(stream *spdystream.Stream) {
- replySent := make(chan struct{})
- err := c.newStreamHandler(stream, replySent)
- rejectStream := (err != nil)
- if rejectStream {
- klog.Warningf("Stream rejected: %v", err)
- stream.Reset()
- return
- }
-
- c.registerStream(stream)
- stream.SendReply(http.Header{}, rejectStream)
- close(replySent)
-}
-
-// SetIdleTimeout sets the amount of time the connection may remain idle before
-// it is automatically closed.
-func (c *connection) SetIdleTimeout(timeout time.Duration) {
- c.conn.SetIdleTimeout(timeout)
-}
-
-func (c *connection) sendPings(period time.Duration) {
- t := time.NewTicker(period)
- defer t.Stop()
- for {
- select {
- case <-c.conn.CloseChan():
- return
- case <-t.C:
- }
- if _, err := c.ping(); err != nil {
- klog.V(3).Infof("SPDY Ping failed: %v", err)
- // Continue, in case this is a transient failure.
- // c.conn.CloseChan above will tell us when the connection is
- // actually closed.
- }
- }
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
deleted file mode 100644
index 4cb1cfadc..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/tls"
- "encoding/base64"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
- "strings"
- "time"
-
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/serializer"
- "k8s.io/apimachinery/pkg/util/httpstream"
- utilnet "k8s.io/apimachinery/pkg/util/net"
- "k8s.io/apimachinery/third_party/forked/golang/netutil"
-)
-
-// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports
-// multiplexed streams. After RoundTrip() is invoked, Conn will be set
-// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface.
-type SpdyRoundTripper struct {
- //tlsConfig holds the TLS configuration settings to use when connecting
- //to the remote server.
- tlsConfig *tls.Config
-
- /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper
- must be safe for use by multiple concurrent goroutines. If this is absolutely
- necessary, we could keep a map from http.Request to net.Conn. In practice,
- a client will create an http.Client, set the transport to a new insteace of
- SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue.
- */
- // conn is the underlying network connection to the remote server.
- conn net.Conn
-
- // Dialer is the dialer used to connect. Used if non-nil.
- Dialer *net.Dialer
-
- // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment
- // Used primarily for mocking the proxy discovery in tests.
- proxier func(req *http.Request) (*url.URL, error)
-
- // followRedirects indicates if the round tripper should examine responses for redirects and
- // follow them.
- followRedirects bool
- // requireSameHostRedirects restricts redirect following to only follow redirects to the same host
- // as the original request.
- requireSameHostRedirects bool
- // pingPeriod is a period for sending Ping frames over established
- // connections.
- pingPeriod time.Duration
-}
-
-var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
-var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
-var _ utilnet.Dialer = &SpdyRoundTripper{}
-
-// NewRoundTripper creates a new SpdyRoundTripper that will use the specified
-// tlsConfig.
-func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
- return NewRoundTripperWithConfig(RoundTripperConfig{
- TLS: tlsConfig,
- FollowRedirects: followRedirects,
- RequireSameHostRedirects: requireSameHostRedirects,
- })
-}
-
-// NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the
-// specified tlsConfig and proxy func.
-func NewRoundTripperWithProxy(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {
- return NewRoundTripperWithConfig(RoundTripperConfig{
- TLS: tlsConfig,
- FollowRedirects: followRedirects,
- RequireSameHostRedirects: requireSameHostRedirects,
- Proxier: proxier,
- })
-}
-
-// NewRoundTripperWithProxy creates a new SpdyRoundTripper with the specified
-// configuration.
-func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper {
- if cfg.Proxier == nil {
- cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
- }
- return &SpdyRoundTripper{
- tlsConfig: cfg.TLS,
- followRedirects: cfg.FollowRedirects,
- requireSameHostRedirects: cfg.RequireSameHostRedirects,
- proxier: cfg.Proxier,
- pingPeriod: cfg.PingPeriod,
- }
-}
-
-// RoundTripperConfig is a set of options for an SpdyRoundTripper.
-type RoundTripperConfig struct {
- // TLS configuration used by the round tripper.
- TLS *tls.Config
- // Proxier is a proxy function invoked on each request. Optional.
- Proxier func(*http.Request) (*url.URL, error)
- // PingPeriod is a period for sending SPDY Pings on the connection.
- // Optional.
- PingPeriod time.Duration
-
- FollowRedirects bool
- RequireSameHostRedirects bool
-}
-
-// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
-// proxying with a spdy roundtripper.
-func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {
- return s.tlsConfig
-}
-
-// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer.
-func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
- conn, err := s.dial(req)
- if err != nil {
- return nil, err
- }
-
- if err := req.Write(conn); err != nil {
- conn.Close()
- return nil, err
- }
-
- return conn, nil
-}
-
-// dial dials the host specified by req, using TLS if appropriate, optionally
-// using a proxy server if one is configured via environment variables.
-func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
- proxyURL, err := s.proxier(req)
- if err != nil {
- return nil, err
- }
-
- if proxyURL == nil {
- return s.dialWithoutProxy(req.Context(), req.URL)
- }
-
- // ensure we use a canonical host with proxyReq
- targetHost := netutil.CanonicalAddr(req.URL)
-
- // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support
- proxyReq := http.Request{
- Method: "CONNECT",
- URL: &url.URL{},
- Host: targetHost,
- }
-
- if pa := s.proxyAuth(proxyURL); pa != "" {
- proxyReq.Header = http.Header{}
- proxyReq.Header.Set("Proxy-Authorization", pa)
- }
-
- proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL)
- if err != nil {
- return nil, err
- }
-
- proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)
- _, err = proxyClientConn.Do(&proxyReq)
- if err != nil && err != httputil.ErrPersistEOF {
- return nil, err
- }
-
- rwc, _ := proxyClientConn.Hijack()
-
- if req.URL.Scheme != "https" {
- return rwc, nil
- }
-
- host, _, err := net.SplitHostPort(targetHost)
- if err != nil {
- return nil, err
- }
-
- tlsConfig := s.tlsConfig
- switch {
- case tlsConfig == nil:
- tlsConfig = &tls.Config{ServerName: host}
- case len(tlsConfig.ServerName) == 0:
- tlsConfig = tlsConfig.Clone()
- tlsConfig.ServerName = host
- }
-
- tlsConn := tls.Client(rwc, tlsConfig)
-
- // need to manually call Handshake() so we can call VerifyHostname() below
- if err := tlsConn.Handshake(); err != nil {
- return nil, err
- }
-
- // Return if we were configured to skip validation
- if tlsConfig.InsecureSkipVerify {
- return tlsConn, nil
- }
-
- if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
- return nil, err
- }
-
- return tlsConn, nil
-}
-
-// dialWithoutProxy dials the host specified by url, using TLS if appropriate.
-func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {
- dialAddr := netutil.CanonicalAddr(url)
-
- if url.Scheme == "http" {
- if s.Dialer == nil {
- var d net.Dialer
- return d.DialContext(ctx, "tcp", dialAddr)
- } else {
- return s.Dialer.DialContext(ctx, "tcp", dialAddr)
- }
- }
-
- // TODO validate the TLSClientConfig is set up?
- var conn *tls.Conn
- var err error
- if s.Dialer == nil {
- conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig)
- } else {
- conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig)
- }
- if err != nil {
- return nil, err
- }
-
- // Return if we were configured to skip validation
- if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
- return conn, nil
- }
-
- host, _, err := net.SplitHostPort(dialAddr)
- if err != nil {
- return nil, err
- }
- if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
- host = s.tlsConfig.ServerName
- }
- err = conn.VerifyHostname(host)
- if err != nil {
- return nil, err
- }
-
- return conn, nil
-}
-
-// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header
-func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {
- if proxyURL == nil || proxyURL.User == nil {
- return ""
- }
- credentials := proxyURL.User.String()
- encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials))
- return fmt.Sprintf("Basic %s", encodedAuth)
-}
-
-// RoundTrip executes the Request and upgrades it. After a successful upgrade,
-// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded
-// connection.
-func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- header := utilnet.CloneHeader(req.Header)
- header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
- header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
-
- var (
- conn net.Conn
- rawResponse []byte
- err error
- )
-
- if s.followRedirects {
- conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects)
- } else {
- clone := utilnet.CloneRequest(req)
- clone.Header = header
- conn, err = s.Dial(clone)
- }
- if err != nil {
- return nil, err
- }
-
- responseReader := bufio.NewReader(
- io.MultiReader(
- bytes.NewBuffer(rawResponse),
- conn,
- ),
- )
-
- resp, err := http.ReadResponse(responseReader, nil)
- if err != nil {
- if conn != nil {
- conn.Close()
- }
- return nil, err
- }
-
- s.conn = conn
-
- return resp, nil
-}
-
-// NewConnection validates the upgrade response, creating and returning a new
-// httpstream.Connection if there were no errors.
-func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {
- connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection))
- upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade))
- if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
- defer resp.Body.Close()
- responseError := ""
- responseErrorBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- responseError = "unable to read error from server response"
- } else {
- // TODO: I don't belong here, I should be abstracted from this class
- if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil {
- if status, ok := obj.(*metav1.Status); ok {
- return nil, &apierrors.StatusError{ErrStatus: *status}
- }
- }
- responseError = string(responseErrorBytes)
- responseError = strings.TrimSpace(responseError)
- }
-
- return nil, fmt.Errorf("unable to upgrade connection: %s", responseError)
- }
-
- return NewClientConnectionWithPings(s.conn, s.pingPeriod)
-}
-
-// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection
-var statusScheme = runtime.NewScheme()
-
-// ParameterCodec knows about query parameters used with the meta v1 API spec.
-var statusCodecs = serializer.NewCodecFactory(statusScheme)
-
-func init() {
- statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion,
- &metav1.Status{},
- )
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
deleted file mode 100644
index f17eb09e9..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "bufio"
- "fmt"
- "io"
- "net"
- "net/http"
- "strings"
- "sync/atomic"
- "time"
-
- "k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-const HeaderSpdy31 = "SPDY/3.1"
-
-// responseUpgrader knows how to upgrade HTTP responses. It
-// implements the httpstream.ResponseUpgrader interface.
-type responseUpgrader struct {
- pingPeriod time.Duration
-}
-
-// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All
-// calls will be handled directly by the underlying net.Conn with the exception
-// of Read and Close calls, which will consider data in the bufio.Reader. This
-// ensures that data already inside the used bufio.Reader instance is also
-// read.
-type connWrapper struct {
- net.Conn
- closed int32
- bufReader *bufio.Reader
-}
-
-func (w *connWrapper) Read(b []byte) (n int, err error) {
- if atomic.LoadInt32(&w.closed) == 1 {
- return 0, io.EOF
- }
- return w.bufReader.Read(b)
-}
-
-func (w *connWrapper) Close() error {
- err := w.Conn.Close()
- atomic.StoreInt32(&w.closed, 1)
- return err
-}
-
-// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is
-// capable of upgrading HTTP responses using SPDY/3.1 via the
-// spdystream package.
-func NewResponseUpgrader() httpstream.ResponseUpgrader {
- return NewResponseUpgraderWithPings(0)
-}
-
-// NewResponseUpgraderWithPings returns a new httpstream.ResponseUpgrader that
-// is capable of upgrading HTTP responses using SPDY/3.1 via the spdystream
-// package.
-//
-// If pingPeriod is non-zero, for each incoming connection a background
-// goroutine will send periodic Ping frames to the server. Use this to keep
-// idle connections through certain load balancers alive longer.
-func NewResponseUpgraderWithPings(pingPeriod time.Duration) httpstream.ResponseUpgrader {
- return responseUpgrader{pingPeriod: pingPeriod}
-}
-
-// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
-// streams. newStreamHandler will be called synchronously whenever the
-// other end of the upgraded connection creates a new stream.
-func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
- connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
- upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
- if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
- errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header)
- http.Error(w, errorMsg, http.StatusBadRequest)
- return nil
- }
-
- hijacker, ok := w.(http.Hijacker)
- if !ok {
- errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response")
- http.Error(w, errorMsg, http.StatusInternalServerError)
- return nil
- }
-
- w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
- w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31)
- w.WriteHeader(http.StatusSwitchingProtocols)
-
- conn, bufrw, err := hijacker.Hijack()
- if err != nil {
- runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err))
- return nil
- }
-
- connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader}
- spdyConn, err := NewServerConnectionWithPings(connWithBuf, newStreamHandler, u.pingPeriod)
- if err != nil {
- runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err))
- return nil
- }
-
- return spdyConn
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
deleted file mode 100644
index acfeb827c..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "time"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
- DefaultStreamCreationTimeout = 30 * time.Second
-
- // The SPDY subprotocol "channel.k8s.io" is used for remote command
- // attachment/execution. This represents the initial unversioned subprotocol,
- // which has the known bugs http://issues.k8s.io/13394 and
- // http://issues.k8s.io/13395.
- StreamProtocolV1Name = "channel.k8s.io"
-
- // The SPDY subprotocol "v2.channel.k8s.io" is used for remote command
- // attachment/execution. It is the second version of the subprotocol and
- // resolves the issues present in the first version.
- StreamProtocolV2Name = "v2.channel.k8s.io"
-
- // The SPDY subprotocol "v3.channel.k8s.io" is used for remote command
- // attachment/execution. It is the third version of the subprotocol and
- // adds support for resizing container terminals.
- StreamProtocolV3Name = "v3.channel.k8s.io"
-
- // The SPDY subprotocol "v4.channel.k8s.io" is used for remote command
- // attachment/execution. It is the 4th version of the subprotocol and
- // adds support for exit codes.
- StreamProtocolV4Name = "v4.channel.k8s.io"
-
- NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode")
- ExitCodeCauseType = metav1.CauseType("ExitCode")
-)
-
-var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
deleted file mode 100644
index 7fe706467..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-import (
- "bufio"
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "strings"
- "unicode"
-
- jsonutil "k8s.io/apimachinery/pkg/util/json"
-
- "k8s.io/klog/v2"
- "sigs.k8s.io/yaml"
-)
-
-// Unmarshal unmarshals the given data
-// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers
-// are converted to int64 or float64
-func Unmarshal(data []byte, v interface{}) error {
- preserveIntFloat := func(d *json.Decoder) *json.Decoder {
- d.UseNumber()
- return d
- }
- switch v := v.(type) {
- case *map[string]interface{}:
- if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil {
- return err
- }
- return jsonutil.ConvertMapNumbers(*v, 0)
- case *[]interface{}:
- if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil {
- return err
- }
- return jsonutil.ConvertSliceNumbers(*v, 0)
- case *interface{}:
- if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil {
- return err
- }
- return jsonutil.ConvertInterfaceNumbers(v, 0)
- default:
- return yaml.Unmarshal(data, v)
- }
-}
-
-// ToJSON converts a single YAML document into a JSON document
-// or returns an error. If the document appears to be JSON the
-// YAML decoding path is not used (so that error messages are
-// JSON specific).
-func ToJSON(data []byte) ([]byte, error) {
- if hasJSONPrefix(data) {
- return data, nil
- }
- return yaml.YAMLToJSON(data)
-}
-
-// YAMLToJSONDecoder decodes YAML documents from an io.Reader by
-// separating individual documents. It first converts the YAML
-// body to JSON, then unmarshals the JSON.
-type YAMLToJSONDecoder struct {
- reader Reader
-}
-
-// NewYAMLToJSONDecoder decodes YAML documents from the provided
-// stream in chunks by converting each document (as defined by
-// the YAML spec) into its own chunk, converting it to JSON via
-// yaml.YAMLToJSON, and then passing it to json.Decoder.
-func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
- reader := bufio.NewReader(r)
- return &YAMLToJSONDecoder{
- reader: NewYAMLReader(reader),
- }
-}
-
-// Decode reads a YAML document as JSON from the stream or returns
-// an error. The decoding rules match json.Unmarshal, not
-// yaml.Unmarshal.
-func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
- bytes, err := d.reader.Read()
- if err != nil && err != io.EOF {
- return err
- }
-
- if len(bytes) != 0 {
- err := yaml.Unmarshal(bytes, into)
- if err != nil {
- return YAMLSyntaxError{err}
- }
- }
- return err
-}
-
-// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
-// the data is not sufficient.
-type YAMLDecoder struct {
- r io.ReadCloser
- scanner *bufio.Scanner
- remaining []byte
-}
-
-// NewDocumentDecoder decodes YAML documents from the provided
-// stream in chunks by converting each document (as defined by
-// the YAML spec) into its own chunk. io.ErrShortBuffer will be
-// returned if the entire buffer could not be read to assist
-// the caller in framing the chunk.
-func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser {
- scanner := bufio.NewScanner(r)
- // the size of initial allocation for buffer 4k
- buf := make([]byte, 4*1024)
- // the maximum size used to buffer a token 5M
- scanner.Buffer(buf, 5*1024*1024)
- scanner.Split(splitYAMLDocument)
- return &YAMLDecoder{
- r: r,
- scanner: scanner,
- }
-}
-
-// Read reads the previous slice into the buffer, or attempts to read
-// the next chunk.
-// TODO: switch to readline approach.
-func (d *YAMLDecoder) Read(data []byte) (n int, err error) {
- left := len(d.remaining)
- if left == 0 {
- // return the next chunk from the stream
- if !d.scanner.Scan() {
- err := d.scanner.Err()
- if err == nil {
- err = io.EOF
- }
- return 0, err
- }
- out := d.scanner.Bytes()
- d.remaining = out
- left = len(out)
- }
-
- // fits within data
- if left <= len(data) {
- copy(data, d.remaining)
- d.remaining = nil
- return left, nil
- }
-
- // caller will need to reread
- copy(data, d.remaining[:len(data)])
- d.remaining = d.remaining[len(data):]
- return len(data), io.ErrShortBuffer
-}
-
-func (d *YAMLDecoder) Close() error {
- return d.r.Close()
-}
-
-const yamlSeparator = "\n---"
-const separator = "---"
-
-// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents.
-func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) {
- if atEOF && len(data) == 0 {
- return 0, nil, nil
- }
- sep := len([]byte(yamlSeparator))
- if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 {
- // We have a potential document terminator
- i += sep
- after := data[i:]
- if len(after) == 0 {
- // we can't read any more characters
- if atEOF {
- return len(data), data[:len(data)-sep], nil
- }
- return 0, nil, nil
- }
- if j := bytes.IndexByte(after, '\n'); j >= 0 {
- return i + j + 1, data[0 : i-sep], nil
- }
- return 0, nil, nil
- }
- // If we're at EOF, we have a final, non-terminated line. Return it.
- if atEOF {
- return len(data), data, nil
- }
- // Request more data.
- return 0, nil, nil
-}
-
-// decoder is a convenience interface for Decode.
-type decoder interface {
- Decode(into interface{}) error
-}
-
-// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
-// YAML documents by sniffing for a leading { character.
-type YAMLOrJSONDecoder struct {
- r io.Reader
- bufferSize int
-
- decoder decoder
- rawData []byte
-}
-
-type JSONSyntaxError struct {
- Line int
- Err error
-}
-
-func (e JSONSyntaxError) Error() string {
- return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error())
-}
-
-type YAMLSyntaxError struct {
- err error
-}
-
-func (e YAMLSyntaxError) Error() string {
- return e.err.Error()
-}
-
-// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents
-// or JSON documents from the given reader as a stream. bufferSize determines
-// how far into the stream the decoder will look to figure out whether this
-// is a JSON stream (has whitespace followed by an open brace).
-func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
- return &YAMLOrJSONDecoder{
- r: r,
- bufferSize: bufferSize,
- }
-}
-
-// Decode unmarshals the next object from the underlying stream into the
-// provide object, or returns an error.
-func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
- if d.decoder == nil {
- buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
- if isJSON {
- d.decoder = json.NewDecoder(buffer)
- d.rawData = origData
- } else {
- d.decoder = NewYAMLToJSONDecoder(buffer)
- }
- }
- err := d.decoder.Decode(into)
- if jsonDecoder, ok := d.decoder.(*json.Decoder); ok {
- if syntax, ok := err.(*json.SyntaxError); ok {
- data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
- if readErr != nil {
- klog.V(4).Infof("reading stream failed: %v", readErr)
- }
- js := string(data)
-
- // if contents from io.Reader are not complete,
- // use the original raw data to prevent panic
- if int64(len(js)) <= syntax.Offset {
- js = string(d.rawData)
- }
-
- start := strings.LastIndex(js[:syntax.Offset], "\n") + 1
- line := strings.Count(js[:start], "\n")
- return JSONSyntaxError{
- Line: line,
- Err: fmt.Errorf(syntax.Error()),
- }
- }
- }
- return err
-}
-
-type Reader interface {
- Read() ([]byte, error)
-}
-
-type YAMLReader struct {
- reader Reader
-}
-
-func NewYAMLReader(r *bufio.Reader) *YAMLReader {
- return &YAMLReader{
- reader: &LineReader{reader: r},
- }
-}
-
-// Read returns a full YAML document.
-func (r *YAMLReader) Read() ([]byte, error) {
- var buffer bytes.Buffer
- for {
- line, err := r.reader.Read()
- if err != nil && err != io.EOF {
- return nil, err
- }
-
- sep := len([]byte(separator))
- if i := bytes.Index(line, []byte(separator)); i == 0 {
- // We have a potential document terminator
- i += sep
- after := line[i:]
- if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 {
- if buffer.Len() != 0 {
- return buffer.Bytes(), nil
- }
- if err == io.EOF {
- return nil, err
- }
- }
- }
- if err == io.EOF {
- if buffer.Len() != 0 {
- // If we're at EOF, we have a final, non-terminated line. Return it.
- return buffer.Bytes(), nil
- }
- return nil, err
- }
- buffer.Write(line)
- }
-}
-
-type LineReader struct {
- reader *bufio.Reader
-}
-
-// Read returns a single line (with '\n' ended) from the underlying reader.
-// An error is returned iff there is an error with the underlying reader.
-func (r *LineReader) Read() ([]byte, error) {
- var (
- isPrefix bool = true
- err error = nil
- line []byte
- buffer bytes.Buffer
- )
-
- for isPrefix && err == nil {
- line, isPrefix, err = r.reader.ReadLine()
- buffer.Write(line)
- }
- buffer.WriteByte('\n')
- return buffer.Bytes(), err
-}
-
-// GuessJSONStream scans the provided reader up to size, looking
-// for an open brace indicating this is JSON. It will return the
-// bufio.Reader it creates for the consumer.
-func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
- buffer := bufio.NewReaderSize(r, size)
- b, _ := buffer.Peek(size)
- return buffer, b, hasJSONPrefix(b)
-}
-
-var jsonPrefix = []byte("{")
-
-// hasJSONPrefix returns true if the provided buffer appears to start with
-// a JSON open brace.
-func hasJSONPrefix(buf []byte) bool {
- return hasPrefix(buf, jsonPrefix)
-}
-
-// Return true if the first non-whitespace bytes in buf is
-// prefix.
-func hasPrefix(buf []byte, prefix []byte) bool {
- trim := bytes.TrimLeftFunc(buf, unicode.IsSpace)
- return bytes.HasPrefix(trim, prefix)
-}