summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman/networks/list.go67
-rw-r--r--cmd/podman/networks/network.go3
-rw-r--r--docs/source/markdown/podman-system-connection.1.md2
-rw-r--r--go.mod6
-rw-r--r--go.sum12
-rw-r--r--libpod/container_api.go7
-rw-r--r--libpod/container_exec.go5
-rw-r--r--libpod/container_inspect.go3
-rw-r--r--libpod/container_internal_linux.go12
-rw-r--r--libpod/define/terminal.go7
-rw-r--r--libpod/oci.go5
-rw-r--r--libpod/oci_attach_linux.go7
-rw-r--r--libpod/oci_attach_unsupported.go5
-rw-r--r--libpod/oci_conmon_exec_linux.go3
-rw-r--r--libpod/oci_conmon_linux.go3
-rw-r--r--libpod/oci_missing.go5
-rw-r--r--pkg/api/handlers/compat/resize.go3
-rw-r--r--pkg/domain/infra/abi/terminal/terminal.go10
-rw-r--r--pkg/domain/infra/abi/terminal/terminal_linux.go5
-rw-r--r--pkg/kubeutils/resize.go4
-rw-r--r--test/e2e/inspect_test.go18
-rw-r--r--test/e2e/network_test.go7
-rw-r--r--test/e2e/run_networking_test.go14
-rw-r--r--test/e2e/run_test.go5
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf6
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go2
-rw-r--r--vendor/github.com/containers/common/pkg/report/template.go16
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/docker/spdystream/CONTRIBUTING.md13
-rw-r--r--vendor/github.com/docker/spdystream/LICENSE191
-rw-r--r--vendor/github.com/docker/spdystream/LICENSE.docs425
-rw-r--r--vendor/github.com/docker/spdystream/MAINTAINERS28
-rw-r--r--vendor/github.com/docker/spdystream/README.md77
-rw-r--r--vendor/github.com/docker/spdystream/connection.go958
-rw-r--r--vendor/github.com/docker/spdystream/handlers.go38
-rw-r--r--vendor/github.com/docker/spdystream/priority.go98
-rw-r--r--vendor/github.com/docker/spdystream/spdy/dictionary.go187
-rw-r--r--vendor/github.com/docker/spdystream/spdy/read.go348
-rw-r--r--vendor/github.com/docker/spdystream/spdy/types.go275
-rw-r--r--vendor/github.com/docker/spdystream/spdy/write.go318
-rw-r--r--vendor/github.com/docker/spdystream/stream.go327
-rw-r--r--vendor/github.com/docker/spdystream/utils.go16
-rw-r--r--vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go71
-rw-r--r--vendor/golang.org/x/oauth2/.travis.yml13
-rw-r--r--vendor/golang.org/x/oauth2/AUTHORS3
-rw-r--r--vendor/golang.org/x/oauth2/CONTRIBUTING.md26
-rw-r--r--vendor/golang.org/x/oauth2/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/oauth2/LICENSE27
-rw-r--r--vendor/golang.org/x/oauth2/README.md36
-rw-r--r--vendor/golang.org/x/oauth2/go.mod10
-rw-r--r--vendor/golang.org/x/oauth2/go.sum12
-rw-r--r--vendor/golang.org/x/oauth2/internal/client_appengine.go13
-rw-r--r--vendor/golang.org/x/oauth2/internal/doc.go6
-rw-r--r--vendor/golang.org/x/oauth2/internal/oauth2.go37
-rw-r--r--vendor/golang.org/x/oauth2/internal/token.go294
-rw-r--r--vendor/golang.org/x/oauth2/internal/transport.go33
-rw-r--r--vendor/golang.org/x/oauth2/oauth2.go381
-rw-r--r--vendor/golang.org/x/oauth2/token.go178
-rw-r--r--vendor/golang.org/x/oauth2/transport.go89
-rw-r--r--vendor/golang.org/x/time/AUTHORS3
-rw-r--r--vendor/golang.org/x/time/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/time/LICENSE27
-rw-r--r--vendor/golang.org/x/time/PATENTS22
-rw-r--r--vendor/golang.org/x/time/rate/rate.go400
-rw-r--r--vendor/google.golang.org/appengine/LICENSE202
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go678
-rw-r--r--vendor/google.golang.org/appengine/internal/api_classic.go169
-rw-r--r--vendor/google.golang.org/appengine/internal/api_common.go123
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id.go28
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.pb.go308
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.proto33
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go4367
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto551
-rw-r--r--vendor/google.golang.org/appengine/internal/identity.go55
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_classic.go61
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_flex.go11
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_vm.go134
-rw-r--r--vendor/google.golang.org/appengine/internal/internal.go110
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.pb.go1313
-rw-r--r--vendor/google.golang.org/appengine/internal/log/log_service.proto150
-rw-r--r--vendor/google.golang.org/appengine/internal/main.go16
-rw-r--r--vendor/google.golang.org/appengine/internal/main_common.go7
-rw-r--r--vendor/google.golang.org/appengine/internal/main_vm.go69
-rw-r--r--vendor/google.golang.org/appengine/internal/metadata.go60
-rw-r--r--vendor/google.golang.org/appengine/internal/net.go56
-rw-r--r--vendor/google.golang.org/appengine/internal/regen.sh40
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go361
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto44
-rw-r--r--vendor/google.golang.org/appengine/internal/transaction.go115
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go527
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/urlfetch/urlfetch.go210
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS23
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/doc.go18
-rw-r--r--vendor/k8s.io/apimachinery/pkg/api/errors/errors.go697
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go500
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go496
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go210
-rw-r--r--vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go55
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go322
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go389
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go63
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go43
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go18
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go476
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go127
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go137
-rw-r--r--vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go250
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/clock/clock.go407
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/framer/framer.go170
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go19
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go157
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go187
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go369
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go120
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go53
-rw-r--r--vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go379
-rw-r--r--vendor/k8s.io/apimachinery/pkg/version/doc.go20
-rw-r--r--vendor/k8s.io/apimachinery/pkg/version/helpers.go88
-rw-r--r--vendor/k8s.io/apimachinery/pkg/version/types.go37
-rw-r--r--vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go27
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS9
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go20
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go50
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go77
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go24
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go55
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go78
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go176
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go128
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go32
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go26
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go24
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go55
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go59
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go142
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go92
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go32
-rw-r--r--vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go128
-rw-r--r--vendor/k8s.io/client-go/pkg/version/.gitattributes1
-rw-r--r--vendor/k8s.io/client-go/pkg/version/base.go63
-rw-r--r--vendor/k8s.io/client-go/pkg/version/def.bzl38
-rw-r--r--vendor/k8s.io/client-go/pkg/version/doc.go21
-rw-r--r--vendor/k8s.io/client-go/pkg/version/version.go42
-rw-r--r--vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go360
-rw-r--r--vendor/k8s.io/client-go/rest/OWNERS26
-rw-r--r--vendor/k8s.io/client-go/rest/client.go258
-rw-r--r--vendor/k8s.io/client-go/rest/config.go549
-rw-r--r--vendor/k8s.io/client-go/rest/plugin.go73
-rw-r--r--vendor/k8s.io/client-go/rest/request.go1206
-rw-r--r--vendor/k8s.io/client-go/rest/transport.go118
-rw-r--r--vendor/k8s.io/client-go/rest/url_utils.go97
-rw-r--r--vendor/k8s.io/client-go/rest/urlbackoff.go107
-rw-r--r--vendor/k8s.io/client-go/rest/watch/decoder.go72
-rw-r--r--vendor/k8s.io/client-go/rest/watch/encoder.go56
-rw-r--r--vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go52
-rw-r--r--vendor/k8s.io/client-go/tools/clientcmd/api/doc.go19
-rw-r--r--vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go188
-rw-r--r--vendor/k8s.io/client-go/tools/clientcmd/api/register.go46
-rw-r--r--vendor/k8s.io/client-go/tools/clientcmd/api/types.go262
-rw-r--r--vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go324
-rw-r--r--vendor/k8s.io/client-go/tools/metrics/OWNERS9
-rw-r--r--vendor/k8s.io/client-go/tools/metrics/metrics.go61
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/doc.go20
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/errorstream.go55
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/reader.go41
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go142
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/resize.go33
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/v1.go160
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/v2.go195
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/v3.go111
-rw-r--r--vendor/k8s.io/client-go/tools/remotecommand/v4.go119
-rw-r--r--vendor/k8s.io/client-go/transport/OWNERS9
-rw-r--r--vendor/k8s.io/client-go/transport/cache.go117
-rw-r--r--vendor/k8s.io/client-go/transport/config.go126
-rw-r--r--vendor/k8s.io/client-go/transport/round_trippers.go564
-rw-r--r--vendor/k8s.io/client-go/transport/spdy/spdy.go94
-rw-r--r--vendor/k8s.io/client-go/transport/token_source.go149
-rw-r--r--vendor/k8s.io/client-go/transport/transport.go227
-rw-r--r--vendor/k8s.io/client-go/util/cert/OWNERS9
-rw-r--r--vendor/k8s.io/client-go/util/cert/cert.go206
-rw-r--r--vendor/k8s.io/client-go/util/cert/csr.go75
-rw-r--r--vendor/k8s.io/client-go/util/cert/io.go98
-rw-r--r--vendor/k8s.io/client-go/util/cert/pem.go61
-rw-r--r--vendor/k8s.io/client-go/util/connrotation/connrotation.go105
-rw-r--r--vendor/k8s.io/client-go/util/exec/exec.go52
-rw-r--r--vendor/k8s.io/client-go/util/flowcontrol/backoff.go149
-rw-r--r--vendor/k8s.io/client-go/util/flowcontrol/throttle.go143
-rw-r--r--vendor/k8s.io/client-go/util/keyutil/OWNERS7
-rw-r--r--vendor/k8s.io/client-go/util/keyutil/key.go323
-rw-r--r--vendor/k8s.io/klog/.travis.yml16
-rw-r--r--vendor/k8s.io/klog/CONTRIBUTING.md22
-rw-r--r--vendor/k8s.io/klog/LICENSE191
-rw-r--r--vendor/k8s.io/klog/OWNERS19
-rw-r--r--vendor/k8s.io/klog/README.md97
-rw-r--r--vendor/k8s.io/klog/RELEASE.md9
-rw-r--r--vendor/k8s.io/klog/SECURITY_CONTACTS20
-rw-r--r--vendor/k8s.io/klog/code-of-conduct.md3
-rw-r--r--vendor/k8s.io/klog/go.mod5
-rw-r--r--vendor/k8s.io/klog/go.sum2
-rw-r--r--vendor/k8s.io/klog/klog.go1308
-rw-r--r--vendor/k8s.io/klog/klog_file.go139
-rw-r--r--vendor/k8s.io/utils/LICENSE202
-rw-r--r--vendor/k8s.io/utils/integer/integer.go73
-rw-r--r--vendor/modules.txt58
-rw-r--r--vendor/sigs.k8s.io/yaml/.gitignore20
-rw-r--r--vendor/sigs.k8s.io/yaml/.travis.yml13
-rw-r--r--vendor/sigs.k8s.io/yaml/CONTRIBUTING.md31
-rw-r--r--vendor/sigs.k8s.io/yaml/LICENSE50
-rw-r--r--vendor/sigs.k8s.io/yaml/OWNERS27
-rw-r--r--vendor/sigs.k8s.io/yaml/README.md123
-rw-r--r--vendor/sigs.k8s.io/yaml/RELEASE.md9
-rw-r--r--vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS17
-rw-r--r--vendor/sigs.k8s.io/yaml/code-of-conduct.md3
-rw-r--r--vendor/sigs.k8s.io/yaml/fields.go502
-rw-r--r--vendor/sigs.k8s.io/yaml/go.mod8
-rw-r--r--vendor/sigs.k8s.io/yaml/go.sum9
-rw-r--r--vendor/sigs.k8s.io/yaml/yaml.go380
-rw-r--r--vendor/sigs.k8s.io/yaml/yaml_go110.go14
219 files changed, 164 insertions, 32800 deletions
diff --git a/cmd/podman/networks/list.go b/cmd/podman/networks/list.go
index 6d8d35589..2181f850b 100644
--- a/cmd/podman/networks/list.go
+++ b/cmd/podman/networks/list.go
@@ -1,7 +1,6 @@
package network
import (
- "encoding/json"
"fmt"
"os"
"strings"
@@ -11,7 +10,6 @@ import (
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/report"
"github.com/containers/podman/v3/cmd/podman/common"
- "github.com/containers/podman/v3/cmd/podman/parse"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
"github.com/containers/podman/v3/libpod/network"
@@ -78,13 +76,38 @@ func networkList(cmd *cobra.Command, args []string) error {
}
switch {
- case report.IsJSON(networkListOptions.Format):
- return jsonOut(responses)
+ // quiet means we only print the network names
case networkListOptions.Quiet:
- // quiet means we only print the network names
- return quietOut(responses)
+ quietOut(responses)
+
+ // JSON output formatting
+ case report.IsJSON(networkListOptions.Format):
+ err = jsonOut(responses)
+
+ // table or other format output
+ default:
+ err = templateOut(responses, cmd)
+ }
+
+ return err
+}
+
+func quietOut(responses []*entities.NetworkListReport) {
+ for _, r := range responses {
+ fmt.Println(r.Name)
+ }
+}
+
+func jsonOut(responses []*entities.NetworkListReport) error {
+ prettyJSON, err := json.MarshalIndent(responses, "", " ")
+ if err != nil {
+ return err
}
+ fmt.Println(string(prettyJSON))
+ return nil
+}
+func templateOut(responses []*entities.NetworkListReport, cmd *cobra.Command) error {
nlprs := make([]ListPrintReports, 0, len(responses))
for _, r := range responses {
nlprs = append(nlprs, ListPrintReports{r})
@@ -99,13 +122,16 @@ func networkList(cmd *cobra.Command, args []string) error {
"Labels": "labels",
"ID": "network id",
})
- renderHeaders := true
- row := "{{.Name}}\t{{.Version}}\t{{.Plugins}}\n"
+
+ renderHeaders := report.HasTable(networkListOptions.Format)
+ var row, format string
if cmd.Flags().Changed("format") {
- renderHeaders = parse.HasTable(networkListOptions.Format)
row = report.NormalizeFormat(networkListOptions.Format)
+ } else { // 'podman network ls' equivalent to 'podman network ls --format="table {{.ID}} {{.Name}} {{.Version}} {{.Plugins}}" '
+ renderHeaders = true
+ row = "{{.ID}}\t{{.Name}}\t{{.Version}}\t{{.Plugins}}\n"
}
- format := parse.EnforceRange(row)
+ format = report.EnforceRange(row)
tmpl, err := template.New("listNetworks").Parse(format)
if err != nil {
@@ -122,34 +148,22 @@ func networkList(cmd *cobra.Command, args []string) error {
return tmpl.Execute(w, nlprs)
}
-func quietOut(responses []*entities.NetworkListReport) error {
- for _, r := range responses {
- fmt.Println(r.Name)
- }
- return nil
-}
-
-func jsonOut(responses []*entities.NetworkListReport) error {
- b, err := json.MarshalIndent(responses, "", " ")
- if err != nil {
- return err
- }
- fmt.Println(string(b))
- return nil
-}
-
+// ListPrintReports returns the network list report
type ListPrintReports struct {
*entities.NetworkListReport
}
+// Version returns the CNI version
func (n ListPrintReports) Version() string {
return n.CNIVersion
}
+// Plugins returns the CNI Plugins
func (n ListPrintReports) Plugins() string {
return network.GetCNIPlugins(n.NetworkConfigList)
}
+// Labels returns any labels added to a Network
func (n ListPrintReports) Labels() string {
list := make([]string, 0, len(n.NetworkListReport.Labels))
for k, v := range n.NetworkListReport.Labels {
@@ -158,6 +172,7 @@ func (n ListPrintReports) Labels() string {
return strings.Join(list, ",")
}
+// ID returns the Podman Network ID
func (n ListPrintReports) ID() string {
length := 12
if noTrunc {
diff --git a/cmd/podman/networks/network.go b/cmd/podman/networks/network.go
index e729f35f3..4d6cd8abd 100644
--- a/cmd/podman/networks/network.go
+++ b/cmd/podman/networks/network.go
@@ -8,6 +8,9 @@ import (
)
var (
+ // Pull in configured json library
+ json = registry.JSONLibrary()
+
// Command: podman _network_
networkCmd = &cobra.Command{
Use: "network",
diff --git a/docs/source/markdown/podman-system-connection.1.md b/docs/source/markdown/podman-system-connection.1.md
index 0673aaee1..6cd4a5fa8 100644
--- a/docs/source/markdown/podman-system-connection.1.md
+++ b/docs/source/markdown/podman-system-connection.1.md
@@ -3,7 +3,7 @@
## NAME
podman\-system\-connection - Manage the destination(s) for Podman service(s)
-## SYNOPSISManage the destination(s) for Podman service(s)
+## SYNOPSIS
**podman system connection** *subcommand*
## DESCRIPTION
diff --git a/go.mod b/go.mod
index 8a7f8f4ba..a185dcd89 100644
--- a/go.mod
+++ b/go.mod
@@ -11,7 +11,7 @@ require (
github.com/containernetworking/cni v0.8.1
github.com/containernetworking/plugins v0.9.0
github.com/containers/buildah v1.19.6
- github.com/containers/common v0.34.3-0.20210208115708-8668c76dd577
+ github.com/containers/common v0.35.0
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.10.2
github.com/containers/ocicrypt v1.1.0
@@ -39,6 +39,7 @@ require (
github.com/kr/text v0.2.0 // indirect
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/onsi/ginkgo v1.15.0
github.com/onsi/gomega v1.10.5
github.com/opencontainers/go-digest v1.0.0
@@ -62,10 +63,9 @@ require (
go.etcd.io/bbolt v1.3.5
go.uber.org/atomic v1.7.0 // indirect
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
- golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4
- google.golang.org/appengine v1.6.6 // indirect
+ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
k8s.io/api v0.0.0-20190620084959-7cf5895f2711
k8s.io/apimachinery v0.20.4
diff --git a/go.sum b/go.sum
index ebe85afe1..3073df41f 100644
--- a/go.sum
+++ b/go.sum
@@ -99,8 +99,8 @@ github.com/containernetworking/plugins v0.9.0/go.mod h1:dbWv4dI0QrBGuVgj+TuVQ6wJ
github.com/containers/buildah v1.19.6 h1:8mPysB7QzHxX9okR+Bwq/lsKAZA/FjDcqB+vebgwI1g=
github.com/containers/buildah v1.19.6/go.mod h1:VnyHWgNmfR1d89/zJ/F4cbwOzaQS+6sBky46W7dCo3E=
github.com/containers/common v0.33.4/go.mod h1:PhgL71XuC4jJ/1BIqeP7doke3aMFkCP90YBXwDeUr9g=
-github.com/containers/common v0.34.3-0.20210208115708-8668c76dd577 h1:tUJcLouJ1bC3w9gdqgKqZBsj2uCuM8D8jSR592lxbhE=
-github.com/containers/common v0.34.3-0.20210208115708-8668c76dd577/go.mod h1:mwZ9H8sK4+dtWxsnVLyWcjxK/gEQClrLsXsqLvbEKbI=
+github.com/containers/common v0.35.0 h1:1OLZ2v+Tj/CN9BTQkKZ5VOriOiArJedinMMqfJRUI38=
+github.com/containers/common v0.35.0/go.mod h1:gs1th7XFTOvVUl4LDPdQjOfOeNiVRDbQ7CNrZ0wS6F8=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.10.1 h1:tHhGQ8RCMxJfJLD/PEW1qrOKX8nndledW9qz6UiAxns=
@@ -430,6 +430,8 @@ github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgi
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
@@ -705,8 +707,6 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -826,8 +826,6 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -867,6 +865,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
diff --git a/libpod/container_api.go b/libpod/container_api.go
index ec5bd08d2..2818ac841 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -14,7 +14,6 @@ import (
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "k8s.io/client-go/tools/remotecommand"
)
// Init creates a container in the OCI runtime, moving a container from
@@ -110,7 +109,7 @@ func (c *Container) Start(ctx context.Context, recursive bool) error {
// Attach call occurs before Start).
// In overall functionality, it is identical to the Start call, with the added
// side effect that an attach session will also be started.
-func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, recursive bool) (<-chan error, error) {
+func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize, recursive bool) (<-chan error, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -236,7 +235,7 @@ func (c *Container) Kill(signal uint) error {
// Attach attaches to a container.
// This function returns when the attach finishes. It does not hold the lock for
// the duration of its runtime, only using it at the beginning to verify state.
-func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize) error {
+func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize) error {
if !c.batched {
c.lock.Lock()
if err := c.syncContainer(); err != nil {
@@ -319,7 +318,7 @@ func (c *Container) HTTPAttach(r *http.Request, w http.ResponseWriter, streams *
// AttachResize resizes the container's terminal, which is displayed by Attach
// and HTTPAttach.
-func (c *Container) AttachResize(newSize remotecommand.TerminalSize) error {
+func (c *Container) AttachResize(newSize define.TerminalSize) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index 8d63ef90f..bb43287d9 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -13,7 +13,6 @@ import (
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "k8s.io/client-go/tools/remotecommand"
)
// ExecConfig contains the configuration of an exec session
@@ -676,7 +675,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
// ExecResize resizes the TTY of the given exec session. Only available if the
// exec session created a TTY.
-func (c *Container) ExecResize(sessionID string, newSize remotecommand.TerminalSize) error {
+func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) error {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -703,7 +702,7 @@ func (c *Container) ExecResize(sessionID string, newSize remotecommand.TerminalS
// Exec emulates the old Libpod exec API, providing a single call to create,
// run, and remove an exec session. Returns exit code and error. Exit code is
// not guaranteed to be set sanely if error is not nil.
-func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resize <-chan remotecommand.TerminalSize) (int, error) {
+func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resize <-chan define.TerminalSize) (int, error) {
sessionID, err := c.ExecCreate(config)
if err != nil {
return -1, err
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 399eff845..e0569e2d4 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -2,6 +2,7 @@ package libpod
import (
"fmt"
+ "sort"
"strings"
"github.com/containers/common/pkg/config"
@@ -698,6 +699,8 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named
for cap := range boundingCaps {
capDrop = append(capDrop, cap)
}
+ // Sort CapDrop so it displays in consistent order (GH #9490)
+ sort.Strings(capDrop)
}
hostConfig.CapAdd = capAdd
hostConfig.CapDrop = capDrop
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 9c3e91a97..dc0418148 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -1713,8 +1713,9 @@ rootless=%d
// generateResolvConf generates a containers resolv.conf
func (c *Container) generateResolvConf() (string, error) {
var (
- nameservers []string
- cniNameServers []string
+ nameservers []string
+ cniNameServers []string
+ cniSearchDomains []string
)
resolvConf := "/etc/resolv.conf"
@@ -1766,6 +1767,10 @@ func (c *Container) generateResolvConf() (string, error) {
cniNameServers = append(cniNameServers, i.DNS.Nameservers...)
logrus.Debugf("adding nameserver(s) from cni response of '%q'", i.DNS.Nameservers)
}
+ if i.DNS.Search != nil {
+ cniSearchDomains = append(cniSearchDomains, i.DNS.Search...)
+ logrus.Debugf("adding search domain(s) from cni response of '%q'", i.DNS.Search)
+ }
}
dns := make([]net.IP, 0, len(c.runtime.config.Containers.DNSServers))
@@ -1797,10 +1802,11 @@ func (c *Container) generateResolvConf() (string, error) {
}
var search []string
- if len(c.config.DNSSearch) > 0 || len(c.runtime.config.Containers.DNSSearches) > 0 {
+ if len(c.config.DNSSearch) > 0 || len(c.runtime.config.Containers.DNSSearches) > 0 || len(cniSearchDomains) > 0 {
if !util.StringInSlice(".", c.config.DNSSearch) {
search = c.runtime.config.Containers.DNSSearches
search = append(search, c.config.DNSSearch...)
+ search = append(search, cniSearchDomains...)
}
} else {
search = resolvconf.GetSearchDomains(resolv.Content)
diff --git a/libpod/define/terminal.go b/libpod/define/terminal.go
new file mode 100644
index 000000000..ce8955544
--- /dev/null
+++ b/libpod/define/terminal.go
@@ -0,0 +1,7 @@
+package define
+
+// TerminalSize represents the width and height of a terminal.
+type TerminalSize struct {
+ Width uint16
+ Height uint16
+}
diff --git a/libpod/oci.go b/libpod/oci.go
index ec6b424ce..f2053f1b5 100644
--- a/libpod/oci.go
+++ b/libpod/oci.go
@@ -4,7 +4,6 @@ import (
"net/http"
"github.com/containers/podman/v3/libpod/define"
- "k8s.io/client-go/tools/remotecommand"
)
// OCIRuntime is an implementation of an OCI runtime.
@@ -64,7 +63,7 @@ type OCIRuntime interface {
// client.
HTTPAttach(ctr *Container, r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, hijackDone chan<- bool, streamAttach, streamLogs bool) error
// AttachResize resizes the terminal in use by the given container.
- AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error
+ AttachResize(ctr *Container, newSize define.TerminalSize) error
// ExecContainer executes a command in a running container.
// Returns an int (PID of exec session), error channel (errors from
@@ -86,7 +85,7 @@ type OCIRuntime interface {
ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error)
// ExecAttachResize resizes the terminal of a running exec session. Only
// allowed with sessions that were created with a TTY.
- ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error
+ ExecAttachResize(ctr *Container, sessionID string, newSize define.TerminalSize) error
// ExecStopContainer stops a given exec session in a running container.
// SIGTERM with be sent initially, then SIGKILL after the given timeout.
// If timeout is 0, SIGKILL will be sent immediately, and SIGTERM will
diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go
index c3db0f9e0..b5040de3e 100644
--- a/libpod/oci_attach_linux.go
+++ b/libpod/oci_attach_linux.go
@@ -18,7 +18,6 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
- "k8s.io/client-go/tools/remotecommand"
)
/* Sync with stdpipe_t in conmon.c */
@@ -40,7 +39,7 @@ func openUnixSocket(path string) (*net.UnixConn, error) {
// Attach to the given container
// Does not check if state is appropriate
// started is only required if startContainer is true
-func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool, attachRdy chan<- bool) error {
+func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize, startContainer bool, started chan bool, attachRdy chan<- bool) error {
if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
}
@@ -172,8 +171,8 @@ func processDetachKeys(keys string) ([]byte, error) {
return detachKeys, nil
}
-func registerResizeFunc(resize <-chan remotecommand.TerminalSize, bundlePath string) {
- kubeutils.HandleResizing(resize, func(size remotecommand.TerminalSize) {
+func registerResizeFunc(resize <-chan define.TerminalSize, bundlePath string) {
+ kubeutils.HandleResizing(resize, func(size define.TerminalSize) {
controlPath := filepath.Join(bundlePath, "ctl")
controlFile, err := os.OpenFile(controlPath, unix.O_WRONLY, 0)
if err != nil {
diff --git a/libpod/oci_attach_unsupported.go b/libpod/oci_attach_unsupported.go
index b2184f993..85e8b32e6 100644
--- a/libpod/oci_attach_unsupported.go
+++ b/libpod/oci_attach_unsupported.go
@@ -6,13 +6,12 @@ import (
"os"
"github.com/containers/podman/v3/libpod/define"
- "k8s.io/client-go/tools/remotecommand"
)
-func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, startContainer bool, started chan bool, attachRdy chan<- bool) error {
+func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize, startContainer bool, started chan bool, attachRdy chan<- bool) error {
return define.ErrNotImplemented
}
-func (c *Container) attachToExec(streams *define.AttachStreams, keys string, resize <-chan remotecommand.TerminalSize, sessionID string, startFd *os.File, attachFd *os.File) error {
+func (c *Container) attachToExec(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize, sessionID string, startFd *os.File, attachFd *os.File) error {
return define.ErrNotImplemented
}
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
index c5f42fe3e..173edba2b 100644
--- a/libpod/oci_conmon_exec_linux.go
+++ b/libpod/oci_conmon_exec_linux.go
@@ -17,7 +17,6 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
- "k8s.io/client-go/tools/remotecommand"
)
// ExecContainer executes a command in a running container
@@ -191,7 +190,7 @@ func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID strin
}
// ExecAttachResize resizes the TTY of the given exec session.
-func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
+func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize define.TerminalSize) error {
controlFile, err := openControlFile(ctr, ctr.execBundlePath(sessionID))
if err != nil {
return err
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 47c628724..de7630c06 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -43,7 +43,6 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
- "k8s.io/client-go/tools/remotecommand"
)
const (
@@ -746,7 +745,7 @@ func openControlFile(ctr *Container, parentDir string) (*os.File, error) {
}
// AttachResize resizes the terminal used by the given container.
-func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error {
+func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize define.TerminalSize) error {
controlFile, err := openControlFile(ctr, ctr.bundlePath())
if err != nil {
return err
diff --git a/libpod/oci_missing.go b/libpod/oci_missing.go
index 0fd14ce52..eb8cdebad 100644
--- a/libpod/oci_missing.go
+++ b/libpod/oci_missing.go
@@ -9,7 +9,6 @@ import (
"github.com/containers/podman/v3/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "k8s.io/client-go/tools/remotecommand"
)
var (
@@ -115,7 +114,7 @@ func (r *MissingRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.Re
}
// AttachResize is not available as the runtime is missing
-func (r *MissingRuntime) AttachResize(ctr *Container, newSize remotecommand.TerminalSize) error {
+func (r *MissingRuntime) AttachResize(ctr *Container, newSize define.TerminalSize) error {
return r.printError()
}
@@ -135,7 +134,7 @@ func (r *MissingRuntime) ExecContainerDetached(ctr *Container, sessionID string,
}
// ExecAttachResize is not available as the runtime is missing.
-func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize remotecommand.TerminalSize) error {
+func (r *MissingRuntime) ExecAttachResize(ctr *Container, sessionID string, newSize define.TerminalSize) error {
return r.printError()
}
diff --git a/pkg/api/handlers/compat/resize.go b/pkg/api/handlers/compat/resize.go
index b3c8b7ce4..1bf7ad460 100644
--- a/pkg/api/handlers/compat/resize.go
+++ b/pkg/api/handlers/compat/resize.go
@@ -11,7 +11,6 @@ import (
"github.com/gorilla/mux"
"github.com/gorilla/schema"
"github.com/pkg/errors"
- "k8s.io/client-go/tools/remotecommand"
)
func ResizeTTY(w http.ResponseWriter, r *http.Request) {
@@ -32,7 +31,7 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
return
}
- sz := remotecommand.TerminalSize{
+ sz := define.TerminalSize{
Width: query.Width,
Height: query.Height,
}
diff --git a/pkg/domain/infra/abi/terminal/terminal.go b/pkg/domain/infra/abi/terminal/terminal.go
index 26a692a7f..067c0c08e 100644
--- a/pkg/domain/infra/abi/terminal/terminal.go
+++ b/pkg/domain/infra/abi/terminal/terminal.go
@@ -5,11 +5,11 @@ import (
"os"
"os/signal"
+ "github.com/containers/podman/v3/libpod/define"
lsignal "github.com/containers/podman/v3/pkg/signal"
"github.com/moby/term"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
- "k8s.io/client-go/tools/remotecommand"
)
// RawTtyFormatter ...
@@ -18,20 +18,20 @@ type RawTtyFormatter struct {
// getResize returns a TerminalSize command matching stdin's current
// size on success, and nil on errors.
-func getResize() *remotecommand.TerminalSize {
+func getResize() *define.TerminalSize {
winsize, err := term.GetWinsize(os.Stdin.Fd())
if err != nil {
logrus.Warnf("Could not get terminal size %v", err)
return nil
}
- return &remotecommand.TerminalSize{
+ return &define.TerminalSize{
Width: winsize.Width,
Height: winsize.Height,
}
}
// Helper for prepareAttach - set up a goroutine to generate terminal resize events
-func resizeTty(ctx context.Context, resize chan remotecommand.TerminalSize) {
+func resizeTty(ctx context.Context, resize chan define.TerminalSize) {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, lsignal.SIGWINCH)
go func() {
@@ -78,7 +78,7 @@ func (f *RawTtyFormatter) Format(entry *logrus.Entry) ([]byte, error) {
return bytes, err
}
-func handleTerminalAttach(ctx context.Context, resize chan remotecommand.TerminalSize) (context.CancelFunc, *term.State, error) {
+func handleTerminalAttach(ctx context.Context, resize chan define.TerminalSize) (context.CancelFunc, *term.State, error) {
logrus.Debugf("Handling terminal attach")
subCtx, cancel := context.WithCancel(ctx)
diff --git a/pkg/domain/infra/abi/terminal/terminal_linux.go b/pkg/domain/infra/abi/terminal/terminal_linux.go
index e5036e769..7a0c2907c 100644
--- a/pkg/domain/infra/abi/terminal/terminal_linux.go
+++ b/pkg/domain/infra/abi/terminal/terminal_linux.go
@@ -11,12 +11,11 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
- "k8s.io/client-go/tools/remotecommand"
)
// ExecAttachCtr execs and attaches to a container
func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, execConfig *libpod.ExecConfig, streams *define.AttachStreams) (int, error) {
- resize := make(chan remotecommand.TerminalSize)
+ resize := make(chan define.TerminalSize)
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
// Check if we are attached to a terminal. If we are, generate resize
@@ -41,7 +40,7 @@ func ExecAttachCtr(ctx context.Context, ctr *libpod.Container, execConfig *libpo
// if you change the signature of this function from os.File to io.Writer, it will trigger a downstream
// error. we may need to just lint disable this one.
func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr, stdin *os.File, detachKeys string, sigProxy bool, startContainer bool, recursive bool) error { //nolint-interfacer
- resize := make(chan remotecommand.TerminalSize)
+ resize := make(chan define.TerminalSize)
haveTerminal := terminal.IsTerminal(int(os.Stdin.Fd()))
diff --git a/pkg/kubeutils/resize.go b/pkg/kubeutils/resize.go
index afc697d98..957e10f45 100644
--- a/pkg/kubeutils/resize.go
+++ b/pkg/kubeutils/resize.go
@@ -17,14 +17,14 @@ limitations under the License.
package kubeutils
import (
+ "github.com/containers/podman/v3/libpod/define"
"k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/client-go/tools/remotecommand"
)
// HandleResizing spawns a goroutine that processes the resize channel, calling resizeFunc for each
// remotecommand.TerminalSize received from the channel. The resize channel must be closed elsewhere to stop the
// goroutine.
-func HandleResizing(resize <-chan remotecommand.TerminalSize, resizeFunc func(size remotecommand.TerminalSize)) {
+func HandleResizing(resize <-chan define.TerminalSize, resizeFunc func(size define.TerminalSize)) {
if resize == nil {
return
}
diff --git a/test/e2e/inspect_test.go b/test/e2e/inspect_test.go
index d417fc49d..772ebed05 100644
--- a/test/e2e/inspect_test.go
+++ b/test/e2e/inspect_test.go
@@ -490,4 +490,22 @@ var _ = Describe("Podman inspect", func() {
}
Expect(found).To(BeTrue())
})
+
+ It("Dropped capabilities are sorted", func() {
+ ctrName := "testCtr"
+ session := podmanTest.Podman([]string{"run", "-d", "--cap-drop", "CAP_AUDIT_WRITE", "--cap-drop", "CAP_MKNOD", "--cap-drop", "CAP_NET_RAW", "--name", ctrName, ALPINE, "top"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(BeZero())
+
+ inspect := podmanTest.Podman([]string{"inspect", ctrName})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect.ExitCode()).To(BeZero())
+
+ data := inspect.InspectContainerToJSON()
+ Expect(len(data)).To(Equal(1))
+ Expect(len(data[0].HostConfig.CapDrop)).To(Equal(3))
+ Expect(data[0].HostConfig.CapDrop[0]).To(Equal("CAP_AUDIT_WRITE"))
+ Expect(data[0].HostConfig.CapDrop[1]).To(Equal("CAP_MKNOD"))
+ Expect(data[0].HostConfig.CapDrop[2]).To(Equal("CAP_NET_RAW"))
+ })
})
diff --git a/test/e2e/network_test.go b/test/e2e/network_test.go
index a7eb6e629..53521cdc4 100644
--- a/test/e2e/network_test.go
+++ b/test/e2e/network_test.go
@@ -150,6 +150,13 @@ var _ = Describe("Podman network", func() {
defer podmanTest.removeCNINetwork(net)
Expect(session.ExitCode()).To(BeZero())
+ // Tests Default Table Output
+ session = podmanTest.Podman([]string{"network", "ls", "--filter", "id=" + netID})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(BeZero())
+ expectedTable := "NETWORK ID NAME VERSION PLUGINS"
+ Expect(session.OutputToString()).To(ContainSubstring(expectedTable))
+
session = podmanTest.Podman([]string{"network", "ls", "--format", "{{.Name}} {{.ID}}", "--filter", "id=" + netID})
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(BeZero())
diff --git a/test/e2e/run_networking_test.go b/test/e2e/run_networking_test.go
index a6237a49a..0e6e636bc 100644
--- a/test/e2e/run_networking_test.go
+++ b/test/e2e/run_networking_test.go
@@ -766,4 +766,18 @@ var _ = Describe("Podman run networking", func() {
session.WaitWithDefaultTimeout()
Expect(session.ExitCode()).To(BeZero())
})
+
+ It("podman run check dnsname adds dns search domain", func() {
+ Skip("needs dnsname#57")
+ net := "dnsname" + stringid.GenerateNonCryptoID()
+ session := podmanTest.Podman([]string{"network", "create", net})
+ session.WaitWithDefaultTimeout()
+ defer podmanTest.removeCNINetwork(net)
+ Expect(session.ExitCode()).To(BeZero())
+
+ session = podmanTest.Podman([]string{"run", "--network", net, ALPINE, "cat", "/etc/resolv.conf"})
+ session.WaitWithDefaultTimeout()
+ Expect(session.ExitCode()).To(BeZero())
+ Expect(session.OutputToString()).To(ContainSubstring("search dns.podman"))
+ })
})
diff --git a/test/e2e/run_test.go b/test/e2e/run_test.go
index c28086fe8..f0ba9d1d9 100644
--- a/test/e2e/run_test.go
+++ b/test/e2e/run_test.go
@@ -1228,9 +1228,10 @@ USER mail`
for _, line := range lines {
parts := strings.SplitN(line, ":", 3)
if !CGROUPSV2 {
- // ignore unified on cgroup v1
+ // ignore unified on cgroup v1.
// both runc and crun do not set it.
- if parts[1] == "" {
+ // crun does not set named hierarchies.
+ if parts[1] == "" || strings.Contains(parts[1], "name=") {
continue
}
}
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 18243f296..0114f2975 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -73,7 +73,6 @@ default_capabilities = [
"SYS_CHROOT"
]
-
# A list of sysctls to be set in containers by default,
# specified as "name=value",
# for example:"net.ipv4.ping_group_range = 0 0".
@@ -241,6 +240,9 @@ default_sysctls = [
#
# cni_plugin_dirs = ["/usr/libexec/cni"]
+# The network name of the default CNI network to attach pods to.
+# default_network = "podman"
+
# Path to the directory where CNI configuration files are located.
#
# network_config_dir = "/etc/cni/net.d/"
@@ -324,7 +326,7 @@ default_sysctls = [
# associated with the pod. This container does nothing other then sleep,
# reserving the pods resources for the lifetime of the pod.
#
-# infra_image = "k8s.gcr.io/pause:3.2"
+# infra_image = "k8s.gcr.io/pause:3.4.1"
# Specify the locking mechanism to use; valid values are "shm" and "file".
# Change the default only if you are sure of what you are doing, in general
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 918ce93e5..57f64c395 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -45,7 +45,7 @@ var (
// DefaultInitPath is the default path to the container-init binary
DefaultInitPath = "/usr/libexec/podman/catatonit"
// DefaultInfraImage to use for infra container
- DefaultInfraImage = "k8s.gcr.io/pause:3.2"
+ DefaultInfraImage = "k8s.gcr.io/pause:3.4.1"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
// DefaultDetachKeys is the default keys sequence for detaching a
diff --git a/vendor/github.com/containers/common/pkg/report/template.go b/vendor/github.com/containers/common/pkg/report/template.go
index 559c1625b..f7b4506bb 100644
--- a/vendor/github.com/containers/common/pkg/report/template.go
+++ b/vendor/github.com/containers/common/pkg/report/template.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"reflect"
+ "regexp"
"strings"
"text/template"
@@ -155,3 +156,18 @@ func (t *Template) Funcs(funcMap FuncMap) *Template {
func (t *Template) IsTable() bool {
return t.isTable
}
+
+var rangeRegex = regexp.MustCompile(`{{\s*range\s*\.\s*}}.*{{\s*end\s*}}`)
+
+// EnforceRange ensures that the format string contains a range
+func EnforceRange(format string) string {
+ if !rangeRegex.MatchString(format) {
+ return "{{range .}}" + format + "{{end}}"
+ }
+ return format
+}
+
+// HasTable returns whether the format is a table
+func HasTable(format string) bool {
+ return strings.HasPrefix(format, "table ")
+}
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index 8efc8b8a2..ff95a6522 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.34.3-dev"
+const Version = "0.35.0"
diff --git a/vendor/github.com/docker/spdystream/CONTRIBUTING.md b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
deleted file mode 100644
index d4eddcc53..000000000
--- a/vendor/github.com/docker/spdystream/CONTRIBUTING.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Contributing to SpdyStream
-
-Want to hack on spdystream? Awesome! Here are instructions to get you
-started.
-
-SpdyStream is a part of the [Docker](https://docker.io) project, and follows
-the same rules and principles. If you're already familiar with the way
-Docker does things, you'll feel right at home.
-
-Otherwise, go read
-[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
-
-Happy hacking!
diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE
deleted file mode 100644
index 9e4bd4dbe..000000000
--- a/vendor/github.com/docker/spdystream/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2014-2015 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs
deleted file mode 100644
index e26cd4fc8..000000000
--- a/vendor/github.com/docker/spdystream/LICENSE.docs
+++ /dev/null
@@ -1,425 +0,0 @@
-Attribution-ShareAlike 4.0 International
-
-=======================================================================
-
-Creative Commons Corporation ("Creative Commons") is not a law firm and
-does not provide legal services or legal advice. Distribution of
-Creative Commons public licenses does not create a lawyer-client or
-other relationship. Creative Commons makes its licenses and related
-information available on an "as-is" basis. Creative Commons gives no
-warranties regarding its licenses, any material licensed under their
-terms and conditions, or any related information. Creative Commons
-disclaims all liability for damages resulting from their use to the
-fullest extent possible.
-
-Using Creative Commons Public Licenses
-
-Creative Commons public licenses provide a standard set of terms and
-conditions that creators and other rights holders may use to share
-original works of authorship and other material subject to copyright
-and certain other rights specified in the public license below. The
-following considerations are for informational purposes only, are not
-exhaustive, and do not form part of our licenses.
-
- Considerations for licensors: Our public licenses are
- intended for use by those authorized to give the public
- permission to use material in ways otherwise restricted by
- copyright and certain other rights. Our licenses are
- irrevocable. Licensors should read and understand the terms
- and conditions of the license they choose before applying it.
- Licensors should also secure all rights necessary before
- applying our licenses so that the public can reuse the
- material as expected. Licensors should clearly mark any
- material not subject to the license. This includes other CC-
- licensed material, or material used under an exception or
- limitation to copyright. More considerations for licensors:
- wiki.creativecommons.org/Considerations_for_licensors
-
- Considerations for the public: By using one of our public
- licenses, a licensor grants the public permission to use the
- licensed material under specified terms and conditions. If
- the licensor's permission is not necessary for any reason--for
- example, because of any applicable exception or limitation to
- copyright--then that use is not regulated by the license. Our
- licenses grant only permissions under copyright and certain
- other rights that a licensor has authority to grant. Use of
- the licensed material may still be restricted for other
- reasons, including because others have copyright or other
- rights in the material. A licensor may make special requests,
- such as asking that all changes be marked or described.
- Although not required by our licenses, you are encouraged to
- respect those requests where reasonable. More_considerations
- for the public:
- wiki.creativecommons.org/Considerations_for_licensees
-
-=======================================================================
-
-Creative Commons Attribution-ShareAlike 4.0 International Public
-License
-
-By exercising the Licensed Rights (defined below), You accept and agree
-to be bound by the terms and conditions of this Creative Commons
-Attribution-ShareAlike 4.0 International Public License ("Public
-License"). To the extent this Public License may be interpreted as a
-contract, You are granted the Licensed Rights in consideration of Your
-acceptance of these terms and conditions, and the Licensor grants You
-such rights in consideration of benefits the Licensor receives from
-making the Licensed Material available under these terms and
-conditions.
-
-
-Section 1 -- Definitions.
-
- a. Adapted Material means material subject to Copyright and Similar
- Rights that is derived from or based upon the Licensed Material
- and in which the Licensed Material is translated, altered,
- arranged, transformed, or otherwise modified in a manner requiring
- permission under the Copyright and Similar Rights held by the
- Licensor. For purposes of this Public License, where the Licensed
- Material is a musical work, performance, or sound recording,
- Adapted Material is always produced where the Licensed Material is
- synched in timed relation with a moving image.
-
- b. Adapter's License means the license You apply to Your Copyright
- and Similar Rights in Your contributions to Adapted Material in
- accordance with the terms and conditions of this Public License.
-
- c. BY-SA Compatible License means a license listed at
- creativecommons.org/compatiblelicenses, approved by Creative
- Commons as essentially the equivalent of this Public License.
-
- d. Copyright and Similar Rights means copyright and/or similar rights
- closely related to copyright including, without limitation,
- performance, broadcast, sound recording, and Sui Generis Database
- Rights, without regard to how the rights are labeled or
- categorized. For purposes of this Public License, the rights
- specified in Section 2(b)(1)-(2) are not Copyright and Similar
- Rights.
-
- e. Effective Technological Measures means those measures that, in the
- absence of proper authority, may not be circumvented under laws
- fulfilling obligations under Article 11 of the WIPO Copyright
- Treaty adopted on December 20, 1996, and/or similar international
- agreements.
-
- f. Exceptions and Limitations means fair use, fair dealing, and/or
- any other exception or limitation to Copyright and Similar Rights
- that applies to Your use of the Licensed Material.
-
- g. License Elements means the license attributes listed in the name
- of a Creative Commons Public License. The License Elements of this
- Public License are Attribution and ShareAlike.
-
- h. Licensed Material means the artistic or literary work, database,
- or other material to which the Licensor applied this Public
- License.
-
- i. Licensed Rights means the rights granted to You subject to the
- terms and conditions of this Public License, which are limited to
- all Copyright and Similar Rights that apply to Your use of the
- Licensed Material and that the Licensor has authority to license.
-
- j. Licensor means the individual(s) or entity(ies) granting rights
- under this Public License.
-
- k. Share means to provide material to the public by any means or
- process that requires permission under the Licensed Rights, such
- as reproduction, public display, public performance, distribution,
- dissemination, communication, or importation, and to make material
- available to the public including in ways that members of the
- public may access the material from a place and at a time
- individually chosen by them.
-
- l. Sui Generis Database Rights means rights other than copyright
- resulting from Directive 96/9/EC of the European Parliament and of
- the Council of 11 March 1996 on the legal protection of databases,
- as amended and/or succeeded, as well as other essentially
- equivalent rights anywhere in the world.
-
- m. You means the individual or entity exercising the Licensed Rights
- under this Public License. Your has a corresponding meaning.
-
-
-Section 2 -- Scope.
-
- a. License grant.
-
- 1. Subject to the terms and conditions of this Public License,
- the Licensor hereby grants You a worldwide, royalty-free,
- non-sublicensable, non-exclusive, irrevocable license to
- exercise the Licensed Rights in the Licensed Material to:
-
- a. reproduce and Share the Licensed Material, in whole or
- in part; and
-
- b. produce, reproduce, and Share Adapted Material.
-
- 2. Exceptions and Limitations. For the avoidance of doubt, where
- Exceptions and Limitations apply to Your use, this Public
- License does not apply, and You do not need to comply with
- its terms and conditions.
-
- 3. Term. The term of this Public License is specified in Section
- 6(a).
-
- 4. Media and formats; technical modifications allowed. The
- Licensor authorizes You to exercise the Licensed Rights in
- all media and formats whether now known or hereafter created,
- and to make technical modifications necessary to do so. The
- Licensor waives and/or agrees not to assert any right or
- authority to forbid You from making technical modifications
- necessary to exercise the Licensed Rights, including
- technical modifications necessary to circumvent Effective
- Technological Measures. For purposes of this Public License,
- simply making modifications authorized by this Section 2(a)
- (4) never produces Adapted Material.
-
- 5. Downstream recipients.
-
- a. Offer from the Licensor -- Licensed Material. Every
- recipient of the Licensed Material automatically
- receives an offer from the Licensor to exercise the
- Licensed Rights under the terms and conditions of this
- Public License.
-
- b. Additional offer from the Licensor -- Adapted Material.
- Every recipient of Adapted Material from You
- automatically receives an offer from the Licensor to
- exercise the Licensed Rights in the Adapted Material
- under the conditions of the Adapter's License You apply.
-
- c. No downstream restrictions. You may not offer or impose
- any additional or different terms or conditions on, or
- apply any Effective Technological Measures to, the
- Licensed Material if doing so restricts exercise of the
- Licensed Rights by any recipient of the Licensed
- Material.
-
- 6. No endorsement. Nothing in this Public License constitutes or
- may be construed as permission to assert or imply that You
- are, or that Your use of the Licensed Material is, connected
- with, or sponsored, endorsed, or granted official status by,
- the Licensor or others designated to receive attribution as
- provided in Section 3(a)(1)(A)(i).
-
- b. Other rights.
-
- 1. Moral rights, such as the right of integrity, are not
- licensed under this Public License, nor are publicity,
- privacy, and/or other similar personality rights; however, to
- the extent possible, the Licensor waives and/or agrees not to
- assert any such rights held by the Licensor to the limited
- extent necessary to allow You to exercise the Licensed
- Rights, but not otherwise.
-
- 2. Patent and trademark rights are not licensed under this
- Public License.
-
- 3. To the extent possible, the Licensor waives any right to
- collect royalties from You for the exercise of the Licensed
- Rights, whether directly or through a collecting society
- under any voluntary or waivable statutory or compulsory
- licensing scheme. In all other cases the Licensor expressly
- reserves any right to collect such royalties.
-
-
-Section 3 -- License Conditions.
-
-Your exercise of the Licensed Rights is expressly made subject to the
-following conditions.
-
- a. Attribution.
-
- 1. If You Share the Licensed Material (including in modified
- form), You must:
-
- a. retain the following if it is supplied by the Licensor
- with the Licensed Material:
-
- i. identification of the creator(s) of the Licensed
- Material and any others designated to receive
- attribution, in any reasonable manner requested by
- the Licensor (including by pseudonym if
- designated);
-
- ii. a copyright notice;
-
- iii. a notice that refers to this Public License;
-
- iv. a notice that refers to the disclaimer of
- warranties;
-
- v. a URI or hyperlink to the Licensed Material to the
- extent reasonably practicable;
-
- b. indicate if You modified the Licensed Material and
- retain an indication of any previous modifications; and
-
- c. indicate the Licensed Material is licensed under this
- Public License, and include the text of, or the URI or
- hyperlink to, this Public License.
-
- 2. You may satisfy the conditions in Section 3(a)(1) in any
- reasonable manner based on the medium, means, and context in
- which You Share the Licensed Material. For example, it may be
- reasonable to satisfy the conditions by providing a URI or
- hyperlink to a resource that includes the required
- information.
-
- 3. If requested by the Licensor, You must remove any of the
- information required by Section 3(a)(1)(A) to the extent
- reasonably practicable.
-
- b. ShareAlike.
-
- In addition to the conditions in Section 3(a), if You Share
- Adapted Material You produce, the following conditions also apply.
-
- 1. The Adapter's License You apply must be a Creative Commons
- license with the same License Elements, this version or
- later, or a BY-SA Compatible License.
-
- 2. You must include the text of, or the URI or hyperlink to, the
- Adapter's License You apply. You may satisfy this condition
- in any reasonable manner based on the medium, means, and
- context in which You Share Adapted Material.
-
- 3. You may not offer or impose any additional or different terms
- or conditions on, or apply any Effective Technological
- Measures to, Adapted Material that restrict exercise of the
- rights granted under the Adapter's License You apply.
-
-
-Section 4 -- Sui Generis Database Rights.
-
-Where the Licensed Rights include Sui Generis Database Rights that
-apply to Your use of the Licensed Material:
-
- a. for the avoidance of doubt, Section 2(a)(1) grants You the right
- to extract, reuse, reproduce, and Share all or a substantial
- portion of the contents of the database;
-
- b. if You include all or a substantial portion of the database
- contents in a database in which You have Sui Generis Database
- Rights, then the database in which You have Sui Generis Database
- Rights (but not its individual contents) is Adapted Material,
-
- including for purposes of Section 3(b); and
- c. You must comply with the conditions in Section 3(a) if You Share
- all or a substantial portion of the contents of the database.
-
-For the avoidance of doubt, this Section 4 supplements and does not
-replace Your obligations under this Public License where the Licensed
-Rights include other Copyright and Similar Rights.
-
-
-Section 5 -- Disclaimer of Warranties and Limitation of Liability.
-
- a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
- EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
- AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
- ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
- IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
- WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
- PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
- ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
- KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
- ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
-
- b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
- TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
- NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
- INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
- COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
- USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
- ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
- DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
- IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
-
- c. The disclaimer of warranties and limitation of liability provided
- above shall be interpreted in a manner that, to the extent
- possible, most closely approximates an absolute disclaimer and
- waiver of all liability.
-
-
-Section 6 -- Term and Termination.
-
- a. This Public License applies for the term of the Copyright and
- Similar Rights licensed here. However, if You fail to comply with
- this Public License, then Your rights under this Public License
- terminate automatically.
-
- b. Where Your right to use the Licensed Material has terminated under
- Section 6(a), it reinstates:
-
- 1. automatically as of the date the violation is cured, provided
- it is cured within 30 days of Your discovery of the
- violation; or
-
- 2. upon express reinstatement by the Licensor.
-
- For the avoidance of doubt, this Section 6(b) does not affect any
- right the Licensor may have to seek remedies for Your violations
- of this Public License.
-
- c. For the avoidance of doubt, the Licensor may also offer the
- Licensed Material under separate terms or conditions or stop
- distributing the Licensed Material at any time; however, doing so
- will not terminate this Public License.
-
- d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
- License.
-
-
-Section 7 -- Other Terms and Conditions.
-
- a. The Licensor shall not be bound by any additional or different
- terms or conditions communicated by You unless expressly agreed.
-
- b. Any arrangements, understandings, or agreements regarding the
- Licensed Material not stated herein are separate from and
- independent of the terms and conditions of this Public License.
-
-
-Section 8 -- Interpretation.
-
- a. For the avoidance of doubt, this Public License does not, and
- shall not be interpreted to, reduce, limit, restrict, or impose
- conditions on any use of the Licensed Material that could lawfully
- be made without permission under this Public License.
-
- b. To the extent possible, if any provision of this Public License is
- deemed unenforceable, it shall be automatically reformed to the
- minimum extent necessary to make it enforceable. If the provision
- cannot be reformed, it shall be severed from this Public License
- without affecting the enforceability of the remaining terms and
- conditions.
-
- c. No term or condition of this Public License will be waived and no
- failure to comply consented to unless expressly agreed to by the
- Licensor.
-
- d. Nothing in this Public License constitutes or may be interpreted
- as a limitation upon, or waiver of, any privileges and immunities
- that apply to the Licensor or You, including from the legal
- processes of any jurisdiction or authority.
-
-
-=======================================================================
-
-Creative Commons is not a party to its public licenses.
-Notwithstanding, Creative Commons may elect to apply one of its public
-licenses to material it publishes and in those instances will be
-considered the "Licensor." Except for the limited purpose of indicating
-that material is shared under a Creative Commons public license or as
-otherwise permitted by the Creative Commons policies published at
-creativecommons.org/policies, Creative Commons does not authorize the
-use of the trademark "Creative Commons" or any other trademark or logo
-of Creative Commons without its prior written consent including,
-without limitation, in connection with any unauthorized modifications
-to any of its public licenses or any other arrangements,
-understandings, or agreements concerning use of licensed material. For
-the avoidance of doubt, this paragraph does not form part of the public
-licenses.
-
-Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/spdystream/MAINTAINERS b/vendor/github.com/docker/spdystream/MAINTAINERS
deleted file mode 100644
index 14e263325..000000000
--- a/vendor/github.com/docker/spdystream/MAINTAINERS
+++ /dev/null
@@ -1,28 +0,0 @@
-# Spdystream maintainers file
-#
-# This file describes who runs the docker/spdystream project and how.
-# This is a living document - if you see something out of date or missing, speak up!
-#
-# It is structured to be consumable by both humans and programs.
-# To extract its contents programmatically, use any TOML-compliant parser.
-#
-# This file is compiled into the MAINTAINERS file in docker/opensource.
-#
-[Org]
- [Org."Core maintainers"]
- people = [
- "dmcgowan",
- ]
-
-[people]
-
-# A reference list of all people associated with the project.
-# All other sections should refer to people by their canonical key
-# in the people section.
-
- # ADD YOURSELF HERE IN ALPHABETICAL ORDER
-
- [people.dmcgowan]
- Name = "Derek McGowan"
- Email = "derek@docker.com"
- GitHub = "dmcgowan"
diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md
deleted file mode 100644
index 11cccd0a0..000000000
--- a/vendor/github.com/docker/spdystream/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-# SpdyStream
-
-A multiplexed stream library using spdy
-
-## Usage
-
-Client example (connecting to mirroring server without auth)
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/docker/spdystream"
- "net"
- "net/http"
-)
-
-func main() {
- conn, err := net.Dial("tcp", "localhost:8080")
- if err != nil {
- panic(err)
- }
- spdyConn, err := spdystream.NewConnection(conn, false)
- if err != nil {
- panic(err)
- }
- go spdyConn.Serve(spdystream.NoOpStreamHandler)
- stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
- if err != nil {
- panic(err)
- }
-
- stream.Wait()
-
- fmt.Fprint(stream, "Writing to stream")
-
- buf := make([]byte, 25)
- stream.Read(buf)
- fmt.Println(string(buf))
-
- stream.Close()
-}
-```
-
-Server example (mirroring server without auth)
-
-```go
-package main
-
-import (
- "github.com/docker/spdystream"
- "net"
-)
-
-func main() {
- listener, err := net.Listen("tcp", "localhost:8080")
- if err != nil {
- panic(err)
- }
- for {
- conn, err := listener.Accept()
- if err != nil {
- panic(err)
- }
- spdyConn, err := spdystream.NewConnection(conn, true)
- if err != nil {
- panic(err)
- }
- go spdyConn.Serve(spdystream.MirrorStreamHandler)
- }
-}
-```
-
-## Copyright and license
-
-Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go
deleted file mode 100644
index 6031a0db1..000000000
--- a/vendor/github.com/docker/spdystream/connection.go
+++ /dev/null
@@ -1,958 +0,0 @@
-package spdystream
-
-import (
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "sync"
- "time"
-
- "github.com/docker/spdystream/spdy"
-)
-
-var (
- ErrInvalidStreamId = errors.New("Invalid stream id")
- ErrTimeout = errors.New("Timeout occured")
- ErrReset = errors.New("Stream reset")
- ErrWriteClosedStream = errors.New("Write on closed stream")
-)
-
-const (
- FRAME_WORKERS = 5
- QUEUE_SIZE = 50
-)
-
-type StreamHandler func(stream *Stream)
-
-type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
-
-type idleAwareFramer struct {
- f *spdy.Framer
- conn *Connection
- writeLock sync.Mutex
- resetChan chan struct{}
- setTimeoutLock sync.Mutex
- setTimeoutChan chan time.Duration
- timeout time.Duration
-}
-
-func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
- iaf := &idleAwareFramer{
- f: framer,
- resetChan: make(chan struct{}, 2),
- // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
- // the same time the connection is being closed
- setTimeoutChan: make(chan time.Duration, 1),
- }
- return iaf
-}
-
-func (i *idleAwareFramer) monitor() {
- var (
- timer *time.Timer
- expired <-chan time.Time
- resetChan = i.resetChan
- setTimeoutChan = i.setTimeoutChan
- )
-Loop:
- for {
- select {
- case timeout := <-i.setTimeoutChan:
- i.timeout = timeout
- if timeout == 0 {
- if timer != nil {
- timer.Stop()
- }
- } else {
- if timer == nil {
- timer = time.NewTimer(timeout)
- expired = timer.C
- } else {
- timer.Reset(timeout)
- }
- }
- case <-resetChan:
- if timer != nil && i.timeout > 0 {
- timer.Reset(i.timeout)
- }
- case <-expired:
- i.conn.streamCond.L.Lock()
- streams := i.conn.streams
- i.conn.streams = make(map[spdy.StreamId]*Stream)
- i.conn.streamCond.Broadcast()
- i.conn.streamCond.L.Unlock()
- go func() {
- for _, stream := range streams {
- stream.resetStream()
- }
- i.conn.Close()
- }()
- case <-i.conn.closeChan:
- if timer != nil {
- timer.Stop()
- }
-
- // Start a goroutine to drain resetChan. This is needed because we've seen
- // some unit tests with large numbers of goroutines get into a situation
- // where resetChan fills up, at least 1 call to Write() is still trying to
- // send to resetChan, the connection gets closed, and this case statement
- // attempts to grab the write lock that Write() already has, causing a
- // deadlock.
- //
- // See https://github.com/docker/spdystream/issues/49 for more details.
- go func() {
- for _ = range resetChan {
- }
- }()
-
- go func() {
- for _ = range setTimeoutChan {
- }
- }()
-
- i.writeLock.Lock()
- close(resetChan)
- i.resetChan = nil
- i.writeLock.Unlock()
-
- i.setTimeoutLock.Lock()
- close(i.setTimeoutChan)
- i.setTimeoutChan = nil
- i.setTimeoutLock.Unlock()
-
- break Loop
- }
- }
-
- // Drain resetChan
- for _ = range resetChan {
- }
-}
-
-func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
- i.writeLock.Lock()
- defer i.writeLock.Unlock()
- if i.resetChan == nil {
- return io.EOF
- }
- err := i.f.WriteFrame(frame)
- if err != nil {
- return err
- }
-
- i.resetChan <- struct{}{}
-
- return nil
-}
-
-func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
- frame, err := i.f.ReadFrame()
- if err != nil {
- return nil, err
- }
-
- // resetChan should never be closed since it is only closed
- // when the connection has closed its closeChan. This closure
- // only occurs after all Reads have finished
- // TODO (dmcgowan): refactor relationship into connection
- i.resetChan <- struct{}{}
-
- return frame, nil
-}
-
-func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
- i.setTimeoutLock.Lock()
- defer i.setTimeoutLock.Unlock()
-
- if i.setTimeoutChan == nil {
- return
- }
-
- i.setTimeoutChan <- timeout
-}
-
-type Connection struct {
- conn net.Conn
- framer *idleAwareFramer
-
- closeChan chan bool
- goneAway bool
- lastStreamChan chan<- *Stream
- goAwayTimeout time.Duration
- closeTimeout time.Duration
-
- streamLock *sync.RWMutex
- streamCond *sync.Cond
- streams map[spdy.StreamId]*Stream
-
- nextIdLock sync.Mutex
- receiveIdLock sync.Mutex
- nextStreamId spdy.StreamId
- receivedStreamId spdy.StreamId
-
- pingIdLock sync.Mutex
- pingId uint32
- pingChans map[uint32]chan error
-
- shutdownLock sync.Mutex
- shutdownChan chan error
- hasShutdown bool
-
- // for testing https://github.com/docker/spdystream/pull/56
- dataFrameHandler func(*spdy.DataFrame) error
-}
-
-// NewConnection creates a new spdy connection from an existing
-// network connection.
-func NewConnection(conn net.Conn, server bool) (*Connection, error) {
- framer, framerErr := spdy.NewFramer(conn, conn)
- if framerErr != nil {
- return nil, framerErr
- }
- idleAwareFramer := newIdleAwareFramer(framer)
- var sid spdy.StreamId
- var rid spdy.StreamId
- var pid uint32
- if server {
- sid = 2
- rid = 1
- pid = 2
- } else {
- sid = 1
- rid = 2
- pid = 1
- }
-
- streamLock := new(sync.RWMutex)
- streamCond := sync.NewCond(streamLock)
-
- session := &Connection{
- conn: conn,
- framer: idleAwareFramer,
-
- closeChan: make(chan bool),
- goAwayTimeout: time.Duration(0),
- closeTimeout: time.Duration(0),
-
- streamLock: streamLock,
- streamCond: streamCond,
- streams: make(map[spdy.StreamId]*Stream),
- nextStreamId: sid,
- receivedStreamId: rid,
-
- pingId: pid,
- pingChans: make(map[uint32]chan error),
-
- shutdownChan: make(chan error),
- }
- session.dataFrameHandler = session.handleDataFrame
- idleAwareFramer.conn = session
- go idleAwareFramer.monitor()
-
- return session, nil
-}
-
-// Ping sends a ping frame across the connection and
-// returns the response time
-func (s *Connection) Ping() (time.Duration, error) {
- pid := s.pingId
- s.pingIdLock.Lock()
- if s.pingId > 0x7ffffffe {
- s.pingId = s.pingId - 0x7ffffffe
- } else {
- s.pingId = s.pingId + 2
- }
- s.pingIdLock.Unlock()
- pingChan := make(chan error)
- s.pingChans[pid] = pingChan
- defer delete(s.pingChans, pid)
-
- frame := &spdy.PingFrame{Id: pid}
- startTime := time.Now()
- writeErr := s.framer.WriteFrame(frame)
- if writeErr != nil {
- return time.Duration(0), writeErr
- }
- select {
- case <-s.closeChan:
- return time.Duration(0), errors.New("connection closed")
- case err, ok := <-pingChan:
- if ok && err != nil {
- return time.Duration(0), err
- }
- break
- }
- return time.Now().Sub(startTime), nil
-}
-
-// Serve handles frames sent from the server, including reply frames
-// which are needed to fully initiate connections. Both clients and servers
-// should call Serve in a separate goroutine before creating streams.
-func (s *Connection) Serve(newHandler StreamHandler) {
- // use a WaitGroup to wait for all frames to be drained after receiving
- // go-away.
- var wg sync.WaitGroup
-
- // Parition queues to ensure stream frames are handled
- // by the same worker, ensuring order is maintained
- frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
- for i := 0; i < FRAME_WORKERS; i++ {
- frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
-
- // Ensure frame queue is drained when connection is closed
- go func(frameQueue *PriorityFrameQueue) {
- <-s.closeChan
- frameQueue.Drain()
- }(frameQueues[i])
-
- wg.Add(1)
- go func(frameQueue *PriorityFrameQueue) {
- // let the WaitGroup know this worker is done
- defer wg.Done()
-
- s.frameHandler(frameQueue, newHandler)
- }(frameQueues[i])
- }
-
- var (
- partitionRoundRobin int
- goAwayFrame *spdy.GoAwayFrame
- )
-Loop:
- for {
- readFrame, err := s.framer.ReadFrame()
- if err != nil {
- if err != io.EOF {
- fmt.Errorf("frame read error: %s", err)
- } else {
- debugMessage("(%p) EOF received", s)
- }
- break
- }
- var priority uint8
- var partition int
- switch frame := readFrame.(type) {
- case *spdy.SynStreamFrame:
- if s.checkStreamFrame(frame) {
- priority = frame.Priority
- partition = int(frame.StreamId % FRAME_WORKERS)
- debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
- s.addStreamFrame(frame)
- } else {
- debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
- continue
- }
- case *spdy.SynReplyFrame:
- priority = s.getStreamPriority(frame.StreamId)
- partition = int(frame.StreamId % FRAME_WORKERS)
- case *spdy.DataFrame:
- priority = s.getStreamPriority(frame.StreamId)
- partition = int(frame.StreamId % FRAME_WORKERS)
- case *spdy.RstStreamFrame:
- priority = s.getStreamPriority(frame.StreamId)
- partition = int(frame.StreamId % FRAME_WORKERS)
- case *spdy.HeadersFrame:
- priority = s.getStreamPriority(frame.StreamId)
- partition = int(frame.StreamId % FRAME_WORKERS)
- case *spdy.PingFrame:
- priority = 0
- partition = partitionRoundRobin
- partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
- case *spdy.GoAwayFrame:
- // hold on to the go away frame and exit the loop
- goAwayFrame = frame
- break Loop
- default:
- priority = 7
- partition = partitionRoundRobin
- partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
- }
- frameQueues[partition].Push(readFrame, priority)
- }
- close(s.closeChan)
-
- // wait for all frame handler workers to indicate they've drained their queues
- // before handling the go away frame
- wg.Wait()
-
- if goAwayFrame != nil {
- s.handleGoAwayFrame(goAwayFrame)
- }
-
- // now it's safe to close remote channels and empty s.streams
- s.streamCond.L.Lock()
- // notify streams that they're now closed, which will
- // unblock any stream Read() calls
- for _, stream := range s.streams {
- stream.closeRemoteChannels()
- }
- s.streams = make(map[spdy.StreamId]*Stream)
- s.streamCond.Broadcast()
- s.streamCond.L.Unlock()
-}
-
-func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
- for {
- popFrame := frameQueue.Pop()
- if popFrame == nil {
- return
- }
-
- var frameErr error
- switch frame := popFrame.(type) {
- case *spdy.SynStreamFrame:
- frameErr = s.handleStreamFrame(frame, newHandler)
- case *spdy.SynReplyFrame:
- frameErr = s.handleReplyFrame(frame)
- case *spdy.DataFrame:
- frameErr = s.dataFrameHandler(frame)
- case *spdy.RstStreamFrame:
- frameErr = s.handleResetFrame(frame)
- case *spdy.HeadersFrame:
- frameErr = s.handleHeaderFrame(frame)
- case *spdy.PingFrame:
- frameErr = s.handlePingFrame(frame)
- case *spdy.GoAwayFrame:
- frameErr = s.handleGoAwayFrame(frame)
- default:
- frameErr = fmt.Errorf("unhandled frame type: %T", frame)
- }
-
- if frameErr != nil {
- fmt.Errorf("frame handling error: %s", frameErr)
- }
- }
-}
-
-func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
- stream, streamOk := s.getStream(streamId)
- if !streamOk {
- return 7
- }
- return stream.priority
-}
-
-func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
- var parent *Stream
- if frame.AssociatedToStreamId != spdy.StreamId(0) {
- parent, _ = s.getStream(frame.AssociatedToStreamId)
- }
-
- stream := &Stream{
- streamId: frame.StreamId,
- parent: parent,
- conn: s,
- startChan: make(chan error),
- headers: frame.Headers,
- finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
- replyCond: sync.NewCond(new(sync.Mutex)),
- dataChan: make(chan []byte),
- headerChan: make(chan http.Header),
- closeChan: make(chan bool),
- }
- if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
- stream.closeRemoteChannels()
- }
-
- s.addStream(stream)
-}
-
-// checkStreamFrame checks to see if a stream frame is allowed.
-// If the stream is invalid, then a reset frame with protocol error
-// will be returned.
-func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
- s.receiveIdLock.Lock()
- defer s.receiveIdLock.Unlock()
- if s.goneAway {
- return false
- }
- validationErr := s.validateStreamId(frame.StreamId)
- if validationErr != nil {
- go func() {
- resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
- if resetErr != nil {
- fmt.Errorf("reset error: %s", resetErr)
- }
- }()
- return false
- }
- return true
-}
-
-func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
- stream, ok := s.getStream(frame.StreamId)
- if !ok {
- return fmt.Errorf("Missing stream: %d", frame.StreamId)
- }
-
- newHandler(stream)
-
- return nil
-}
-
-func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
- debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
- stream, streamOk := s.getStream(frame.StreamId)
- if !streamOk {
- debugMessage("Reply frame gone away for %d", frame.StreamId)
- // Stream has already gone away
- return nil
- }
- if stream.replied {
- // Stream has already received reply
- return nil
- }
- stream.replied = true
-
- // TODO Check for error
- if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
- s.remoteStreamFinish(stream)
- }
-
- close(stream.startChan)
-
- return nil
-}
-
-func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
- stream, streamOk := s.getStream(frame.StreamId)
- if !streamOk {
- // Stream has already been removed
- return nil
- }
- s.removeStream(stream)
- stream.closeRemoteChannels()
-
- if !stream.replied {
- stream.replied = true
- stream.startChan <- ErrReset
- close(stream.startChan)
- }
-
- stream.finishLock.Lock()
- stream.finished = true
- stream.finishLock.Unlock()
-
- return nil
-}
-
-func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
- stream, streamOk := s.getStream(frame.StreamId)
- if !streamOk {
- // Stream has already gone away
- return nil
- }
- if !stream.replied {
- // No reply received...Protocol error?
- return nil
- }
-
- // TODO limit headers while not blocking (use buffered chan or goroutine?)
- select {
- case <-stream.closeChan:
- return nil
- case stream.headerChan <- frame.Headers:
- }
-
- if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
- s.remoteStreamFinish(stream)
- }
-
- return nil
-}
-
-func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
- debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
- stream, streamOk := s.getStream(frame.StreamId)
- if !streamOk {
- debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
- // Stream has already gone away
- return nil
- }
- if !stream.replied {
- debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
- // No reply received...Protocol error?
- return nil
- }
-
- debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
- if len(frame.Data) > 0 {
- stream.dataLock.RLock()
- select {
- case <-stream.closeChan:
- debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
- case stream.dataChan <- frame.Data:
- debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
- }
- stream.dataLock.RUnlock()
- }
- if (frame.Flags & spdy.DataFlagFin) != 0x00 {
- s.remoteStreamFinish(stream)
- }
- return nil
-}
-
-func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
- if s.pingId&0x01 != frame.Id&0x01 {
- return s.framer.WriteFrame(frame)
- }
- pingChan, pingOk := s.pingChans[frame.Id]
- if pingOk {
- close(pingChan)
- }
- return nil
-}
-
-func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
- debugMessage("(%p) Go away received", s)
- s.receiveIdLock.Lock()
- if s.goneAway {
- s.receiveIdLock.Unlock()
- return nil
- }
- s.goneAway = true
- s.receiveIdLock.Unlock()
-
- if s.lastStreamChan != nil {
- stream, _ := s.getStream(frame.LastGoodStreamId)
- go func() {
- s.lastStreamChan <- stream
- }()
- }
-
- // Do not block frame handler waiting for closure
- go s.shutdown(s.goAwayTimeout)
-
- return nil
-}
-
-func (s *Connection) remoteStreamFinish(stream *Stream) {
- stream.closeRemoteChannels()
-
- stream.finishLock.Lock()
- if stream.finished {
- // Stream is fully closed, cleanup
- s.removeStream(stream)
- }
- stream.finishLock.Unlock()
-}
-
-// CreateStream creates a new spdy stream using the parameters for
-// creating the stream frame. The stream frame will be sent upon
-// calling this function, however this function does not wait for
-// the reply frame. If waiting for the reply is desired, use
-// the stream Wait or WaitTimeout function on the stream returned
-// by this function.
-func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
- // MUST synchronize stream creation (all the way to writing the frame)
- // as stream IDs **MUST** increase monotonically.
- s.nextIdLock.Lock()
- defer s.nextIdLock.Unlock()
-
- streamId := s.getNextStreamId()
- if streamId == 0 {
- return nil, fmt.Errorf("Unable to get new stream id")
- }
-
- stream := &Stream{
- streamId: streamId,
- parent: parent,
- conn: s,
- startChan: make(chan error),
- headers: headers,
- dataChan: make(chan []byte),
- headerChan: make(chan http.Header),
- closeChan: make(chan bool),
- }
-
- debugMessage("(%p) (%p) Create stream", s, stream)
-
- s.addStream(stream)
-
- return stream, s.sendStream(stream, fin)
-}
-
-func (s *Connection) shutdown(closeTimeout time.Duration) {
- // TODO Ensure this isn't called multiple times
- s.shutdownLock.Lock()
- if s.hasShutdown {
- s.shutdownLock.Unlock()
- return
- }
- s.hasShutdown = true
- s.shutdownLock.Unlock()
-
- var timeout <-chan time.Time
- if closeTimeout > time.Duration(0) {
- timeout = time.After(closeTimeout)
- }
- streamsClosed := make(chan bool)
-
- go func() {
- s.streamCond.L.Lock()
- for len(s.streams) > 0 {
- debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
- s.streamCond.Wait()
- }
- s.streamCond.L.Unlock()
- close(streamsClosed)
- }()
-
- var err error
- select {
- case <-streamsClosed:
- // No active streams, close should be safe
- err = s.conn.Close()
- case <-timeout:
- // Force ungraceful close
- err = s.conn.Close()
- // Wait for cleanup to clear active streams
- <-streamsClosed
- }
-
- if err != nil {
- duration := 10 * time.Minute
- time.AfterFunc(duration, func() {
- select {
- case err, ok := <-s.shutdownChan:
- if ok {
- fmt.Errorf("Unhandled close error after %s: %s", duration, err)
- }
- default:
- }
- })
- s.shutdownChan <- err
- }
- close(s.shutdownChan)
-
- return
-}
-
-// Closes spdy connection by sending GoAway frame and initiating shutdown
-func (s *Connection) Close() error {
- s.receiveIdLock.Lock()
- if s.goneAway {
- s.receiveIdLock.Unlock()
- return nil
- }
- s.goneAway = true
- s.receiveIdLock.Unlock()
-
- var lastStreamId spdy.StreamId
- if s.receivedStreamId > 2 {
- lastStreamId = s.receivedStreamId - 2
- }
-
- goAwayFrame := &spdy.GoAwayFrame{
- LastGoodStreamId: lastStreamId,
- Status: spdy.GoAwayOK,
- }
-
- err := s.framer.WriteFrame(goAwayFrame)
- if err != nil {
- return err
- }
-
- go s.shutdown(s.closeTimeout)
-
- return nil
-}
-
-// CloseWait closes the connection and waits for shutdown
-// to finish. Note the underlying network Connection
-// is not closed until the end of shutdown.
-func (s *Connection) CloseWait() error {
- closeErr := s.Close()
- if closeErr != nil {
- return closeErr
- }
- shutdownErr, ok := <-s.shutdownChan
- if ok {
- return shutdownErr
- }
- return nil
-}
-
-// Wait waits for the connection to finish shutdown or for
-// the wait timeout duration to expire. This needs to be
-// called either after Close has been called or the GOAWAYFRAME
-// has been received. If the wait timeout is 0, this function
-// will block until shutdown finishes. If wait is never called
-// and a shutdown error occurs, that error will be logged as an
-// unhandled error.
-func (s *Connection) Wait(waitTimeout time.Duration) error {
- var timeout <-chan time.Time
- if waitTimeout > time.Duration(0) {
- timeout = time.After(waitTimeout)
- }
-
- select {
- case err, ok := <-s.shutdownChan:
- if ok {
- return err
- }
- case <-timeout:
- return ErrTimeout
- }
- return nil
-}
-
-// NotifyClose registers a channel to be called when the remote
-// peer inidicates connection closure. The last stream to be
-// received by the remote will be sent on the channel. The notify
-// timeout will determine the duration between go away received
-// and the connection being closed.
-func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
- s.goAwayTimeout = timeout
- s.lastStreamChan = c
-}
-
-// SetCloseTimeout sets the amount of time close will wait for
-// streams to finish before terminating the underlying network
-// connection. Setting the timeout to 0 will cause close to
-// wait forever, which is the default.
-func (s *Connection) SetCloseTimeout(timeout time.Duration) {
- s.closeTimeout = timeout
-}
-
-// SetIdleTimeout sets the amount of time the connection may sit idle before
-// it is forcefully terminated.
-func (s *Connection) SetIdleTimeout(timeout time.Duration) {
- s.framer.setIdleTimeout(timeout)
-}
-
-func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
- var flags spdy.ControlFlags
- if fin {
- flags = spdy.ControlFlagFin
- }
-
- headerFrame := &spdy.HeadersFrame{
- StreamId: stream.streamId,
- Headers: headers,
- CFHeader: spdy.ControlFrameHeader{Flags: flags},
- }
-
- return s.framer.WriteFrame(headerFrame)
-}
-
-func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
- var flags spdy.ControlFlags
- if fin {
- flags = spdy.ControlFlagFin
- }
-
- replyFrame := &spdy.SynReplyFrame{
- StreamId: stream.streamId,
- Headers: headers,
- CFHeader: spdy.ControlFrameHeader{Flags: flags},
- }
-
- return s.framer.WriteFrame(replyFrame)
-}
-
-func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
- resetFrame := &spdy.RstStreamFrame{
- StreamId: streamId,
- Status: status,
- }
-
- return s.framer.WriteFrame(resetFrame)
-}
-
-func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
- return s.sendResetFrame(status, stream.streamId)
-}
-
-func (s *Connection) sendStream(stream *Stream, fin bool) error {
- var flags spdy.ControlFlags
- if fin {
- flags = spdy.ControlFlagFin
- stream.finished = true
- }
-
- var parentId spdy.StreamId
- if stream.parent != nil {
- parentId = stream.parent.streamId
- }
-
- streamFrame := &spdy.SynStreamFrame{
- StreamId: spdy.StreamId(stream.streamId),
- AssociatedToStreamId: spdy.StreamId(parentId),
- Headers: stream.headers,
- CFHeader: spdy.ControlFrameHeader{Flags: flags},
- }
-
- return s.framer.WriteFrame(streamFrame)
-}
-
-// getNextStreamId returns the next sequential id
-// every call should produce a unique value or an error
-func (s *Connection) getNextStreamId() spdy.StreamId {
- sid := s.nextStreamId
- if sid > 0x7fffffff {
- return 0
- }
- s.nextStreamId = s.nextStreamId + 2
- return sid
-}
-
-// PeekNextStreamId returns the next sequential id and keeps the next id untouched
-func (s *Connection) PeekNextStreamId() spdy.StreamId {
- sid := s.nextStreamId
- return sid
-}
-
-func (s *Connection) validateStreamId(rid spdy.StreamId) error {
- if rid > 0x7fffffff || rid < s.receivedStreamId {
- return ErrInvalidStreamId
- }
- s.receivedStreamId = rid + 2
- return nil
-}
-
-func (s *Connection) addStream(stream *Stream) {
- s.streamCond.L.Lock()
- s.streams[stream.streamId] = stream
- debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
- s.streamCond.Broadcast()
- s.streamCond.L.Unlock()
-}
-
-func (s *Connection) removeStream(stream *Stream) {
- s.streamCond.L.Lock()
- delete(s.streams, stream.streamId)
- debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
- s.streamCond.Broadcast()
- s.streamCond.L.Unlock()
-}
-
-func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
- s.streamLock.RLock()
- stream, ok = s.streams[streamId]
- s.streamLock.RUnlock()
- return
-}
-
-// FindStream looks up the given stream id and either waits for the
-// stream to be found or returns nil if the stream id is no longer
-// valid.
-func (s *Connection) FindStream(streamId uint32) *Stream {
- var stream *Stream
- var ok bool
- s.streamCond.L.Lock()
- stream, ok = s.streams[spdy.StreamId(streamId)]
- debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
- for !ok && streamId >= uint32(s.receivedStreamId) {
- s.streamCond.Wait()
- stream, ok = s.streams[spdy.StreamId(streamId)]
- }
- s.streamCond.L.Unlock()
- return stream
-}
-
-func (s *Connection) CloseChan() <-chan bool {
- return s.closeChan
-}
diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go
deleted file mode 100644
index b59fa5fdc..000000000
--- a/vendor/github.com/docker/spdystream/handlers.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package spdystream
-
-import (
- "io"
- "net/http"
-)
-
-// MirrorStreamHandler mirrors all streams.
-func MirrorStreamHandler(stream *Stream) {
- replyErr := stream.SendReply(http.Header{}, false)
- if replyErr != nil {
- return
- }
-
- go func() {
- io.Copy(stream, stream)
- stream.Close()
- }()
- go func() {
- for {
- header, receiveErr := stream.ReceiveHeader()
- if receiveErr != nil {
- return
- }
- sendErr := stream.SendHeader(header, false)
- if sendErr != nil {
- return
- }
- }
- }()
-}
-
-// NoopStreamHandler does nothing when stream connects, most
-// likely used with RejectAuthHandler which will not allow any
-// streams to make it to the stream handler.
-func NoOpStreamHandler(stream *Stream) {
- stream.SendReply(http.Header{}, false)
-}
diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go
deleted file mode 100644
index fc8582b5c..000000000
--- a/vendor/github.com/docker/spdystream/priority.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package spdystream
-
-import (
- "container/heap"
- "sync"
-
- "github.com/docker/spdystream/spdy"
-)
-
-type prioritizedFrame struct {
- frame spdy.Frame
- priority uint8
- insertId uint64
-}
-
-type frameQueue []*prioritizedFrame
-
-func (fq frameQueue) Len() int {
- return len(fq)
-}
-
-func (fq frameQueue) Less(i, j int) bool {
- if fq[i].priority == fq[j].priority {
- return fq[i].insertId < fq[j].insertId
- }
- return fq[i].priority < fq[j].priority
-}
-
-func (fq frameQueue) Swap(i, j int) {
- fq[i], fq[j] = fq[j], fq[i]
-}
-
-func (fq *frameQueue) Push(x interface{}) {
- *fq = append(*fq, x.(*prioritizedFrame))
-}
-
-func (fq *frameQueue) Pop() interface{} {
- old := *fq
- n := len(old)
- *fq = old[0 : n-1]
- return old[n-1]
-}
-
-type PriorityFrameQueue struct {
- queue *frameQueue
- c *sync.Cond
- size int
- nextInsertId uint64
- drain bool
-}
-
-func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
- queue := make(frameQueue, 0, size)
- heap.Init(&queue)
-
- return &PriorityFrameQueue{
- queue: &queue,
- size: size,
- c: sync.NewCond(&sync.Mutex{}),
- }
-}
-
-func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
- q.c.L.Lock()
- defer q.c.L.Unlock()
- for q.queue.Len() >= q.size {
- q.c.Wait()
- }
- pFrame := &prioritizedFrame{
- frame: frame,
- priority: priority,
- insertId: q.nextInsertId,
- }
- q.nextInsertId = q.nextInsertId + 1
- heap.Push(q.queue, pFrame)
- q.c.Signal()
-}
-
-func (q *PriorityFrameQueue) Pop() spdy.Frame {
- q.c.L.Lock()
- defer q.c.L.Unlock()
- for q.queue.Len() == 0 {
- if q.drain {
- return nil
- }
- q.c.Wait()
- }
- frame := heap.Pop(q.queue).(*prioritizedFrame).frame
- q.c.Signal()
- return frame
-}
-
-func (q *PriorityFrameQueue) Drain() {
- q.c.L.Lock()
- defer q.c.L.Unlock()
- q.drain = true
- q.c.Broadcast()
-}
diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go
deleted file mode 100644
index 5a5ff0e14..000000000
--- a/vendor/github.com/docker/spdystream/spdy/dictionary.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package spdy
-
-// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
-var headerDictionary = []byte{
- 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
- 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
- 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
- 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
- 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
- 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
- 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
- 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
- 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
- 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
- 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
- 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
- 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
- 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
- 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
- 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
- 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
- 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
- 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
- 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
- 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
- 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
- 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
- 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
- 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
- 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
- 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
- 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
- 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
- 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
- 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
- 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
- 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
- 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
- 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
- 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
- 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
- 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
- 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
- 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
- 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
- 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
- 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
- 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
- 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
- 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
- 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
- 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
- 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
- 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
- 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
- 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
- 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
- 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
- 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
- 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
- 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
- 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
- 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
- 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
- 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
- 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
- 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
- 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
- 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
- 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
- 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
- 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
- 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
- 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
- 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
- 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
- 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
- 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
- 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
- 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
- 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
- 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
- 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
- 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
- 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
- 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
- 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
- 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
- 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
- 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
- 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
- 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
- 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
- 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
- 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
- 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
- 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
- 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
- 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
- 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
- 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
- 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
- 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
- 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
- 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
- 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
- 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
- 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
- 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
- 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
- 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
- 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
- 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
- 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
- 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
- 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
- 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
- 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
- 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
- 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
- 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
- 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
- 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
- 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
- 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
- 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
- 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
- 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
- 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
- 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
- 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
- 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
- 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
- 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
- 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
- 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
- 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
- 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
- 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
- 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
- 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
- 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
- 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
- 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
- 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
- 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
- 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
- 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
- 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
- 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
- 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
- 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
- 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
- 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
- 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
-}
diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go
deleted file mode 100644
index 9359a9501..000000000
--- a/vendor/github.com/docker/spdystream/spdy/read.go
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package spdy
-
-import (
- "compress/zlib"
- "encoding/binary"
- "io"
- "net/http"
- "strings"
-)
-
-func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
- return f.readSynStreamFrame(h, frame)
-}
-
-func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
- return f.readSynReplyFrame(h, frame)
-}
-
-func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
- return err
- }
- if frame.Status == 0 {
- return &Error{InvalidControlFrame, frame.StreamId}
- }
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- return nil
-}
-
-func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- var numSettings uint32
- if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
- return err
- }
- frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
- for i := uint32(0); i < numSettings; i++ {
- if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
- return err
- }
- frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
- frame.FlagIdValues[i].Id &= 0xffffff
- if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
- return err
- }
- if frame.Id == 0 {
- return &Error{ZeroStreamId, 0}
- }
- if frame.CFHeader.Flags != 0 {
- return &Error{InvalidControlFrame, StreamId(frame.Id)}
- }
- return nil
-}
-
-func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
- return err
- }
- if frame.CFHeader.Flags != 0 {
- return &Error{InvalidControlFrame, frame.LastGoodStreamId}
- }
- if frame.CFHeader.length != 8 {
- return &Error{InvalidControlFrame, frame.LastGoodStreamId}
- }
- if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
- return err
- }
- return nil
-}
-
-func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
- return f.readHeadersFrame(h, frame)
-}
-
-func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- if frame.CFHeader.Flags != 0 {
- return &Error{InvalidControlFrame, frame.StreamId}
- }
- if frame.CFHeader.length != 8 {
- return &Error{InvalidControlFrame, frame.StreamId}
- }
- if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
- return err
- }
- return nil
-}
-
-func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
- ctor, ok := cframeCtor[frameType]
- if !ok {
- return nil, &Error{Err: InvalidControlFrame}
- }
- return ctor(), nil
-}
-
-var cframeCtor = map[ControlFrameType]func() controlFrame{
- TypeSynStream: func() controlFrame { return new(SynStreamFrame) },
- TypeSynReply: func() controlFrame { return new(SynReplyFrame) },
- TypeRstStream: func() controlFrame { return new(RstStreamFrame) },
- TypeSettings: func() controlFrame { return new(SettingsFrame) },
- TypePing: func() controlFrame { return new(PingFrame) },
- TypeGoAway: func() controlFrame { return new(GoAwayFrame) },
- TypeHeaders: func() controlFrame { return new(HeadersFrame) },
- TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
-}
-
-func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
- if f.headerDecompressor != nil {
- f.headerReader.N = payloadSize
- return nil
- }
- f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
- decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
- if err != nil {
- return err
- }
- f.headerDecompressor = decompressor
- return nil
-}
-
-// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
-func (f *Framer) ReadFrame() (Frame, error) {
- var firstWord uint32
- if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
- return nil, err
- }
- if firstWord&0x80000000 != 0 {
- frameType := ControlFrameType(firstWord & 0xffff)
- version := uint16(firstWord >> 16 & 0x7fff)
- return f.parseControlFrame(version, frameType)
- }
- return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
-}
-
-func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
- var length uint32
- if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
- return nil, err
- }
- flags := ControlFlags((length & 0xff000000) >> 24)
- length &= 0xffffff
- header := ControlFrameHeader{version, frameType, flags, length}
- cframe, err := newControlFrame(frameType)
- if err != nil {
- return nil, err
- }
- if err = cframe.read(header, f); err != nil {
- return nil, err
- }
- return cframe, nil
-}
-
-func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
- var numHeaders uint32
- if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
- return nil, err
- }
- var e error
- h := make(http.Header, int(numHeaders))
- for i := 0; i < int(numHeaders); i++ {
- var length uint32
- if err := binary.Read(r, binary.BigEndian, &length); err != nil {
- return nil, err
- }
- nameBytes := make([]byte, length)
- if _, err := io.ReadFull(r, nameBytes); err != nil {
- return nil, err
- }
- name := string(nameBytes)
- if name != strings.ToLower(name) {
- e = &Error{UnlowercasedHeaderName, streamId}
- name = strings.ToLower(name)
- }
- if h[name] != nil {
- e = &Error{DuplicateHeaders, streamId}
- }
- if err := binary.Read(r, binary.BigEndian, &length); err != nil {
- return nil, err
- }
- value := make([]byte, length)
- if _, err := io.ReadFull(r, value); err != nil {
- return nil, err
- }
- valueList := strings.Split(string(value), headerValueSeparator)
- for _, v := range valueList {
- h.Add(name, v)
- }
- }
- if e != nil {
- return h, e
- }
- return h, nil
-}
-
-func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
- frame.CFHeader = h
- var err error
- if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
- return err
- }
- if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
- return err
- }
- frame.Priority >>= 5
- if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
- return err
- }
- reader := f.r
- if !f.headerCompressionDisabled {
- err := f.uncorkHeaderDecompressor(int64(h.length - 10))
- if err != nil {
- return err
- }
- reader = f.headerDecompressor
- }
- frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
- if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
- err = &Error{WrongCompressedPayloadSize, 0}
- }
- if err != nil {
- return err
- }
- for h := range frame.Headers {
- if invalidReqHeaders[h] {
- return &Error{InvalidHeaderPresent, frame.StreamId}
- }
- }
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- return nil
-}
-
-func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
- frame.CFHeader = h
- var err error
- if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- reader := f.r
- if !f.headerCompressionDisabled {
- err := f.uncorkHeaderDecompressor(int64(h.length - 4))
- if err != nil {
- return err
- }
- reader = f.headerDecompressor
- }
- frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
- if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
- err = &Error{WrongCompressedPayloadSize, 0}
- }
- if err != nil {
- return err
- }
- for h := range frame.Headers {
- if invalidRespHeaders[h] {
- return &Error{InvalidHeaderPresent, frame.StreamId}
- }
- }
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- return nil
-}
-
-func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
- frame.CFHeader = h
- var err error
- if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- reader := f.r
- if !f.headerCompressionDisabled {
- err := f.uncorkHeaderDecompressor(int64(h.length - 4))
- if err != nil {
- return err
- }
- reader = f.headerDecompressor
- }
- frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
- if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
- err = &Error{WrongCompressedPayloadSize, 0}
- }
- if err != nil {
- return err
- }
- var invalidHeaders map[string]bool
- if frame.StreamId%2 == 0 {
- invalidHeaders = invalidReqHeaders
- } else {
- invalidHeaders = invalidRespHeaders
- }
- for h := range frame.Headers {
- if invalidHeaders[h] {
- return &Error{InvalidHeaderPresent, frame.StreamId}
- }
- }
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- return nil
-}
-
-func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
- var length uint32
- if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
- return nil, err
- }
- var frame DataFrame
- frame.StreamId = streamId
- frame.Flags = DataFlags(length >> 24)
- length &= 0xffffff
- frame.Data = make([]byte, length)
- if _, err := io.ReadFull(f.r, frame.Data); err != nil {
- return nil, err
- }
- if frame.StreamId == 0 {
- return nil, &Error{ZeroStreamId, 0}
- }
- return &frame, nil
-}
diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go
deleted file mode 100644
index 7b6ee9c6f..000000000
--- a/vendor/github.com/docker/spdystream/spdy/types.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package spdy implements the SPDY protocol (currently SPDY/3), described in
-// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
-package spdy
-
-import (
- "bytes"
- "compress/zlib"
- "io"
- "net/http"
-)
-
-// Version is the protocol version number that this package implements.
-const Version = 3
-
-// ControlFrameType stores the type field in a control frame header.
-type ControlFrameType uint16
-
-const (
- TypeSynStream ControlFrameType = 0x0001
- TypeSynReply = 0x0002
- TypeRstStream = 0x0003
- TypeSettings = 0x0004
- TypePing = 0x0006
- TypeGoAway = 0x0007
- TypeHeaders = 0x0008
- TypeWindowUpdate = 0x0009
-)
-
-// ControlFlags are the flags that can be set on a control frame.
-type ControlFlags uint8
-
-const (
- ControlFlagFin ControlFlags = 0x01
- ControlFlagUnidirectional = 0x02
- ControlFlagSettingsClearSettings = 0x01
-)
-
-// DataFlags are the flags that can be set on a data frame.
-type DataFlags uint8
-
-const (
- DataFlagFin DataFlags = 0x01
-)
-
-// MaxDataLength is the maximum number of bytes that can be stored in one frame.
-const MaxDataLength = 1<<24 - 1
-
-// headerValueSepator separates multiple header values.
-const headerValueSeparator = "\x00"
-
-// Frame is a single SPDY frame in its unpacked in-memory representation. Use
-// Framer to read and write it.
-type Frame interface {
- write(f *Framer) error
-}
-
-// ControlFrameHeader contains all the fields in a control frame header,
-// in its unpacked in-memory representation.
-type ControlFrameHeader struct {
- // Note, high bit is the "Control" bit.
- version uint16 // spdy version number
- frameType ControlFrameType
- Flags ControlFlags
- length uint32 // length of data field
-}
-
-type controlFrame interface {
- Frame
- read(h ControlFrameHeader, f *Framer) error
-}
-
-// StreamId represents a 31-bit value identifying the stream.
-type StreamId uint32
-
-// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
-// frame.
-type SynStreamFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
- Priority uint8 // priority of this frame (3-bit)
- Slot uint8 // index in the server's credential vector of the client certificate
- Headers http.Header
-}
-
-// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
-type SynReplyFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- Headers http.Header
-}
-
-// RstStreamStatus represents the status that led to a RST_STREAM.
-type RstStreamStatus uint32
-
-const (
- ProtocolError RstStreamStatus = iota + 1
- InvalidStream
- RefusedStream
- UnsupportedVersion
- Cancel
- InternalError
- FlowControlError
- StreamInUse
- StreamAlreadyClosed
- InvalidCredentials
- FrameTooLarge
-)
-
-// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
-// frame.
-type RstStreamFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- Status RstStreamStatus
-}
-
-// SettingsFlag represents a flag in a SETTINGS frame.
-type SettingsFlag uint8
-
-const (
- FlagSettingsPersistValue SettingsFlag = 0x1
- FlagSettingsPersisted = 0x2
-)
-
-// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
-type SettingsId uint32
-
-const (
- SettingsUploadBandwidth SettingsId = iota + 1
- SettingsDownloadBandwidth
- SettingsRoundTripTime
- SettingsMaxConcurrentStreams
- SettingsCurrentCwnd
- SettingsDownloadRetransRate
- SettingsInitialWindowSize
- SettingsClientCretificateVectorSize
-)
-
-// SettingsFlagIdValue is the unpacked, in-memory representation of the
-// combined flag/id/value for a setting in a SETTINGS frame.
-type SettingsFlagIdValue struct {
- Flag SettingsFlag
- Id SettingsId
- Value uint32
-}
-
-// SettingsFrame is the unpacked, in-memory representation of a SPDY
-// SETTINGS frame.
-type SettingsFrame struct {
- CFHeader ControlFrameHeader
- FlagIdValues []SettingsFlagIdValue
-}
-
-// PingFrame is the unpacked, in-memory representation of a PING frame.
-type PingFrame struct {
- CFHeader ControlFrameHeader
- Id uint32 // unique id for this ping, from server is even, from client is odd.
-}
-
-// GoAwayStatus represents the status in a GoAwayFrame.
-type GoAwayStatus uint32
-
-const (
- GoAwayOK GoAwayStatus = iota
- GoAwayProtocolError
- GoAwayInternalError
-)
-
-// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
-type GoAwayFrame struct {
- CFHeader ControlFrameHeader
- LastGoodStreamId StreamId // last stream id which was accepted by sender
- Status GoAwayStatus
-}
-
-// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
-type HeadersFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- Headers http.Header
-}
-
-// WindowUpdateFrame is the unpacked, in-memory representation of a
-// WINDOW_UPDATE frame.
-type WindowUpdateFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- DeltaWindowSize uint32 // additional number of bytes to existing window size
-}
-
-// TODO: Implement credential frame and related methods.
-
-// DataFrame is the unpacked, in-memory representation of a DATA frame.
-type DataFrame struct {
- // Note, high bit is the "Control" bit. Should be 0 for data frames.
- StreamId StreamId
- Flags DataFlags
- Data []byte // payload data of this frame
-}
-
-// A SPDY specific error.
-type ErrorCode string
-
-const (
- UnlowercasedHeaderName ErrorCode = "header was not lowercased"
- DuplicateHeaders = "multiple headers with same name"
- WrongCompressedPayloadSize = "compressed payload size was incorrect"
- UnknownFrameType = "unknown frame type"
- InvalidControlFrame = "invalid control frame"
- InvalidDataFrame = "invalid data frame"
- InvalidHeaderPresent = "frame contained invalid header"
- ZeroStreamId = "stream id zero is disallowed"
-)
-
-// Error contains both the type of error and additional values. StreamId is 0
-// if Error is not associated with a stream.
-type Error struct {
- Err ErrorCode
- StreamId StreamId
-}
-
-func (e *Error) Error() string {
- return string(e.Err)
-}
-
-var invalidReqHeaders = map[string]bool{
- "Connection": true,
- "Host": true,
- "Keep-Alive": true,
- "Proxy-Connection": true,
- "Transfer-Encoding": true,
-}
-
-var invalidRespHeaders = map[string]bool{
- "Connection": true,
- "Keep-Alive": true,
- "Proxy-Connection": true,
- "Transfer-Encoding": true,
-}
-
-// Framer handles serializing/deserializing SPDY frames, including compressing/
-// decompressing payloads.
-type Framer struct {
- headerCompressionDisabled bool
- w io.Writer
- headerBuf *bytes.Buffer
- headerCompressor *zlib.Writer
- r io.Reader
- headerReader io.LimitedReader
- headerDecompressor io.ReadCloser
-}
-
-// NewFramer allocates a new Framer for a given SPDY connection, represented by
-// a io.Writer and io.Reader. Note that Framer will read and write individual fields
-// from/to the Reader and Writer, so the caller should pass in an appropriately
-// buffered implementation to optimize performance.
-func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
- compressBuf := new(bytes.Buffer)
- compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
- if err != nil {
- return nil, err
- }
- framer := &Framer{
- w: w,
- headerBuf: compressBuf,
- headerCompressor: compressor,
- r: r,
- }
- return framer, nil
-}
diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go
deleted file mode 100644
index b212f66a2..000000000
--- a/vendor/github.com/docker/spdystream/spdy/write.go
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package spdy
-
-import (
- "encoding/binary"
- "io"
- "net/http"
- "strings"
-)
-
-func (frame *SynStreamFrame) write(f *Framer) error {
- return f.writeSynStreamFrame(frame)
-}
-
-func (frame *SynReplyFrame) write(f *Framer) error {
- return f.writeSynReplyFrame(frame)
-}
-
-func (frame *RstStreamFrame) write(f *Framer) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeRstStream
- frame.CFHeader.Flags = 0
- frame.CFHeader.length = 8
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- if frame.Status == 0 {
- return &Error{InvalidControlFrame, frame.StreamId}
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
- return
- }
- return
-}
-
-func (frame *SettingsFrame) write(f *Framer) (err error) {
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeSettings
- frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
- return
- }
- for _, flagIdValue := range frame.FlagIdValues {
- flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
- if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
- return
- }
- }
- return
-}
-
-func (frame *PingFrame) write(f *Framer) (err error) {
- if frame.Id == 0 {
- return &Error{ZeroStreamId, 0}
- }
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypePing
- frame.CFHeader.Flags = 0
- frame.CFHeader.length = 4
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
- return
- }
- return
-}
-
-func (frame *GoAwayFrame) write(f *Framer) (err error) {
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeGoAway
- frame.CFHeader.Flags = 0
- frame.CFHeader.length = 8
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
- return
- }
- return nil
-}
-
-func (frame *HeadersFrame) write(f *Framer) error {
- return f.writeHeadersFrame(frame)
-}
-
-func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeWindowUpdate
- frame.CFHeader.Flags = 0
- frame.CFHeader.length = 8
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
- return
- }
- return nil
-}
-
-func (frame *DataFrame) write(f *Framer) error {
- return f.writeDataFrame(frame)
-}
-
-// WriteFrame writes a frame.
-func (f *Framer) WriteFrame(frame Frame) error {
- return frame.write(f)
-}
-
-func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
- if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
- return err
- }
- if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
- return err
- }
- flagsAndLength := uint32(h.Flags)<<24 | h.length
- if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
- return err
- }
- return nil
-}
-
-func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
- n = 0
- if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
- return
- }
- n += 2
- for name, values := range h {
- if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
- return
- }
- n += 2
- name = strings.ToLower(name)
- if _, err = io.WriteString(w, name); err != nil {
- return
- }
- n += len(name)
- v := strings.Join(values, headerValueSeparator)
- if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
- return
- }
- n += 2
- if _, err = io.WriteString(w, v); err != nil {
- return
- }
- n += len(v)
- }
- return
-}
-
-func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- // Marshal the headers.
- var writer io.Writer = f.headerBuf
- if !f.headerCompressionDisabled {
- writer = f.headerCompressor
- }
- if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
- return
- }
- if !f.headerCompressionDisabled {
- f.headerCompressor.Flush()
- }
-
- // Set ControlFrameHeader.
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeSynStream
- frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return err
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return err
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
- return err
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
- return err
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
- return err
- }
- if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
- return err
- }
- f.headerBuf.Reset()
- return nil
-}
-
-func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- // Marshal the headers.
- var writer io.Writer = f.headerBuf
- if !f.headerCompressionDisabled {
- writer = f.headerCompressor
- }
- if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
- return
- }
- if !f.headerCompressionDisabled {
- f.headerCompressor.Flush()
- }
-
- // Set ControlFrameHeader.
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeSynReply
- frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
- return
- }
- f.headerBuf.Reset()
- return
-}
-
-func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- // Marshal the headers.
- var writer io.Writer = f.headerBuf
- if !f.headerCompressionDisabled {
- writer = f.headerCompressor
- }
- if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
- return
- }
- if !f.headerCompressionDisabled {
- f.headerCompressor.Flush()
- }
-
- // Set ControlFrameHeader.
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeHeaders
- frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
- return
- }
- f.headerBuf.Reset()
- return
-}
-
-func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
- return &Error{InvalidDataFrame, frame.StreamId}
- }
-
- // Serialize frame to Writer.
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
- if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
- return
- }
- if _, err = f.w.Write(frame.Data); err != nil {
- return
- }
- return nil
-}
diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go
deleted file mode 100644
index f9e9ee267..000000000
--- a/vendor/github.com/docker/spdystream/stream.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package spdystream
-
-import (
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "sync"
- "time"
-
- "github.com/docker/spdystream/spdy"
-)
-
-var (
- ErrUnreadPartialData = errors.New("unread partial data")
-)
-
-type Stream struct {
- streamId spdy.StreamId
- parent *Stream
- conn *Connection
- startChan chan error
-
- dataLock sync.RWMutex
- dataChan chan []byte
- unread []byte
-
- priority uint8
- headers http.Header
- headerChan chan http.Header
- finishLock sync.Mutex
- finished bool
- replyCond *sync.Cond
- replied bool
- closeLock sync.Mutex
- closeChan chan bool
-}
-
-// WriteData writes data to stream, sending a dataframe per call
-func (s *Stream) WriteData(data []byte, fin bool) error {
- s.waitWriteReply()
- var flags spdy.DataFlags
-
- if fin {
- flags = spdy.DataFlagFin
- s.finishLock.Lock()
- if s.finished {
- s.finishLock.Unlock()
- return ErrWriteClosedStream
- }
- s.finished = true
- s.finishLock.Unlock()
- }
-
- dataFrame := &spdy.DataFrame{
- StreamId: s.streamId,
- Flags: flags,
- Data: data,
- }
-
- debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
- return s.conn.framer.WriteFrame(dataFrame)
-}
-
-// Write writes bytes to a stream, calling write data for each call.
-func (s *Stream) Write(data []byte) (n int, err error) {
- err = s.WriteData(data, false)
- if err == nil {
- n = len(data)
- }
- return
-}
-
-// Read reads bytes from a stream, a single read will never get more
-// than what is sent on a single data frame, but a multiple calls to
-// read may get data from the same data frame.
-func (s *Stream) Read(p []byte) (n int, err error) {
- if s.unread == nil {
- select {
- case <-s.closeChan:
- return 0, io.EOF
- case read, ok := <-s.dataChan:
- if !ok {
- return 0, io.EOF
- }
- s.unread = read
- }
- }
- n = copy(p, s.unread)
- if n < len(s.unread) {
- s.unread = s.unread[n:]
- } else {
- s.unread = nil
- }
- return
-}
-
-// ReadData reads an entire data frame and returns the byte array
-// from the data frame. If there is unread data from the result
-// of a Read call, this function will return an ErrUnreadPartialData.
-func (s *Stream) ReadData() ([]byte, error) {
- debugMessage("(%p) Reading data from %d", s, s.streamId)
- if s.unread != nil {
- return nil, ErrUnreadPartialData
- }
- select {
- case <-s.closeChan:
- return nil, io.EOF
- case read, ok := <-s.dataChan:
- if !ok {
- return nil, io.EOF
- }
- return read, nil
- }
-}
-
-func (s *Stream) waitWriteReply() {
- if s.replyCond != nil {
- s.replyCond.L.Lock()
- for !s.replied {
- s.replyCond.Wait()
- }
- s.replyCond.L.Unlock()
- }
-}
-
-// Wait waits for the stream to receive a reply.
-func (s *Stream) Wait() error {
- return s.WaitTimeout(time.Duration(0))
-}
-
-// WaitTimeout waits for the stream to receive a reply or for timeout.
-// When the timeout is reached, ErrTimeout will be returned.
-func (s *Stream) WaitTimeout(timeout time.Duration) error {
- var timeoutChan <-chan time.Time
- if timeout > time.Duration(0) {
- timeoutChan = time.After(timeout)
- }
-
- select {
- case err := <-s.startChan:
- if err != nil {
- return err
- }
- break
- case <-timeoutChan:
- return ErrTimeout
- }
- return nil
-}
-
-// Close closes the stream by sending an empty data frame with the
-// finish flag set, indicating this side is finished with the stream.
-func (s *Stream) Close() error {
- select {
- case <-s.closeChan:
- // Stream is now fully closed
- s.conn.removeStream(s)
- default:
- break
- }
- return s.WriteData([]byte{}, true)
-}
-
-// Reset sends a reset frame, putting the stream into the fully closed state.
-func (s *Stream) Reset() error {
- s.conn.removeStream(s)
- return s.resetStream()
-}
-
-func (s *Stream) resetStream() error {
- // Always call closeRemoteChannels, even if s.finished is already true.
- // This makes it so that stream.Close() followed by stream.Reset() allows
- // stream.Read() to unblock.
- s.closeRemoteChannels()
-
- s.finishLock.Lock()
- if s.finished {
- s.finishLock.Unlock()
- return nil
- }
- s.finished = true
- s.finishLock.Unlock()
-
- resetFrame := &spdy.RstStreamFrame{
- StreamId: s.streamId,
- Status: spdy.Cancel,
- }
- return s.conn.framer.WriteFrame(resetFrame)
-}
-
-// CreateSubStream creates a stream using the current as the parent
-func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
- return s.conn.CreateStream(headers, s, fin)
-}
-
-// SetPriority sets the stream priority, does not affect the
-// remote priority of this stream after Open has been called.
-// Valid values are 0 through 7, 0 being the highest priority
-// and 7 the lowest.
-func (s *Stream) SetPriority(priority uint8) {
- s.priority = priority
-}
-
-// SendHeader sends a header frame across the stream
-func (s *Stream) SendHeader(headers http.Header, fin bool) error {
- return s.conn.sendHeaders(headers, s, fin)
-}
-
-// SendReply sends a reply on a stream, only valid to be called once
-// when handling a new stream
-func (s *Stream) SendReply(headers http.Header, fin bool) error {
- if s.replyCond == nil {
- return errors.New("cannot reply on initiated stream")
- }
- s.replyCond.L.Lock()
- defer s.replyCond.L.Unlock()
- if s.replied {
- return nil
- }
-
- err := s.conn.sendReply(headers, s, fin)
- if err != nil {
- return err
- }
-
- s.replied = true
- s.replyCond.Broadcast()
- return nil
-}
-
-// Refuse sends a reset frame with the status refuse, only
-// valid to be called once when handling a new stream. This
-// may be used to indicate that a stream is not allowed
-// when http status codes are not being used.
-func (s *Stream) Refuse() error {
- if s.replied {
- return nil
- }
- s.replied = true
- return s.conn.sendReset(spdy.RefusedStream, s)
-}
-
-// Cancel sends a reset frame with the status canceled. This
-// can be used at any time by the creator of the Stream to
-// indicate the stream is no longer needed.
-func (s *Stream) Cancel() error {
- return s.conn.sendReset(spdy.Cancel, s)
-}
-
-// ReceiveHeader receives a header sent on the other side
-// of the stream. This function will block until a header
-// is received or stream is closed.
-func (s *Stream) ReceiveHeader() (http.Header, error) {
- select {
- case <-s.closeChan:
- break
- case header, ok := <-s.headerChan:
- if !ok {
- return nil, fmt.Errorf("header chan closed")
- }
- return header, nil
- }
- return nil, fmt.Errorf("stream closed")
-}
-
-// Parent returns the parent stream
-func (s *Stream) Parent() *Stream {
- return s.parent
-}
-
-// Headers returns the headers used to create the stream
-func (s *Stream) Headers() http.Header {
- return s.headers
-}
-
-// String returns the string version of stream using the
-// streamId to uniquely identify the stream
-func (s *Stream) String() string {
- return fmt.Sprintf("stream:%d", s.streamId)
-}
-
-// Identifier returns a 32 bit identifier for the stream
-func (s *Stream) Identifier() uint32 {
- return uint32(s.streamId)
-}
-
-// IsFinished returns whether the stream has finished
-// sending data
-func (s *Stream) IsFinished() bool {
- return s.finished
-}
-
-// Implement net.Conn interface
-
-func (s *Stream) LocalAddr() net.Addr {
- return s.conn.conn.LocalAddr()
-}
-
-func (s *Stream) RemoteAddr() net.Addr {
- return s.conn.conn.RemoteAddr()
-}
-
-// TODO set per stream values instead of connection-wide
-
-func (s *Stream) SetDeadline(t time.Time) error {
- return s.conn.conn.SetDeadline(t)
-}
-
-func (s *Stream) SetReadDeadline(t time.Time) error {
- return s.conn.conn.SetReadDeadline(t)
-}
-
-func (s *Stream) SetWriteDeadline(t time.Time) error {
- return s.conn.conn.SetWriteDeadline(t)
-}
-
-func (s *Stream) closeRemoteChannels() {
- s.closeLock.Lock()
- defer s.closeLock.Unlock()
- select {
- case <-s.closeChan:
- default:
- close(s.closeChan)
- }
-}
diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go
deleted file mode 100644
index 1b2c199a4..000000000
--- a/vendor/github.com/docker/spdystream/utils.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package spdystream
-
-import (
- "log"
- "os"
-)
-
-var (
- DEBUG = os.Getenv("DEBUG")
-)
-
-func debugMessage(fmt string, args ...interface{}) {
- if DEBUG != "" {
- log.Printf(fmt, args...)
- }
-}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
deleted file mode 100644
index 37dc0cfdb..000000000
--- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
-package ctxhttp // import "golang.org/x/net/context/ctxhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "net/url"
- "strings"
-)
-
-// Do sends an HTTP request with the provided http.Client and returns
-// an HTTP response.
-//
-// If the client is nil, http.DefaultClient is used.
-//
-// The provided ctx must be non-nil. If it is canceled or times out,
-// ctx.Err() will be returned.
-func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
- if client == nil {
- client = http.DefaultClient
- }
- resp, err := client.Do(req.WithContext(ctx))
- // If we got an error, and the context has been canceled,
- // the context's error is probably more useful.
- if err != nil {
- select {
- case <-ctx.Done():
- err = ctx.Err()
- default:
- }
- }
- return resp, err
-}
-
-// Get issues a GET request via the Do function.
-func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Head issues a HEAD request via the Do function.
-func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("HEAD", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Post issues a POST request via the Do function.
-func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
- req, err := http.NewRequest("POST", url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", bodyType)
- return Do(ctx, client, req)
-}
-
-// PostForm issues a POST request via the Do function.
-func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
- return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml
deleted file mode 100644
index fa139db22..000000000
--- a/vendor/golang.org/x/oauth2/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-go:
- - tip
-
-install:
- - export GOPATH="$HOME/gopath"
- - mkdir -p "$GOPATH/src/golang.org/x"
- - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
- - go get -v -t -d golang.org/x/oauth2/...
-
-script:
- - go test -v golang.org/x/oauth2/...
diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS
deleted file mode 100644
index 15167cd74..000000000
--- a/vendor/golang.org/x/oauth2/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
deleted file mode 100644
index dfbed62cf..000000000
--- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Contributing to Go
-
-Go is an open source project.
-
-It is the work of hundreds of contributors. We appreciate your help!
-
-## Filing issues
-
-When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
-
-1. What version of Go are you using (`go version`)?
-2. What operating system and processor architecture are you using?
-3. What did you do?
-4. What did you expect to see?
-5. What did you see instead?
-
-General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
-The gophers there will answer or ask you to file an issue if you've tripped over a bug.
-
-## Contributing code
-
-Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
-before sending patches.
-
-Unless otherwise noted, the Go source files are distributed under
-the BSD-style license found in the LICENSE file.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS
deleted file mode 100644
index 1c4577e96..000000000
--- a/vendor/golang.org/x/oauth2/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE
deleted file mode 100644
index 6a66aea5e..000000000
--- a/vendor/golang.org/x/oauth2/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
deleted file mode 100644
index 8cfd6063e..000000000
--- a/vendor/golang.org/x/oauth2/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# OAuth2 for Go
-
-[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
-[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
-
-oauth2 package contains a client implementation for OAuth 2.0 spec.
-
-## Installation
-
-~~~~
-go get golang.org/x/oauth2
-~~~~
-
-Or you can manually git clone the repository to
-`$(go env GOPATH)/src/golang.org/x/oauth2`.
-
-See godoc for further documentation and examples.
-
-* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2)
-* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google)
-
-## Policy for new packages
-
-We no longer accept new provider-specific packages in this repo if all
-they do is add a single endpoint variable. If you just want to add a
-single endpoint, add it to the
-[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints)
-package.
-
-## Report Issues / Send Patches
-
-This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
-
-The main issue tracker for the oauth2 repository is located at
-https://github.com/golang/oauth2/issues.
diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod
deleted file mode 100644
index b34578155..000000000
--- a/vendor/golang.org/x/oauth2/go.mod
+++ /dev/null
@@ -1,10 +0,0 @@
-module golang.org/x/oauth2
-
-go 1.11
-
-require (
- cloud.google.com/go v0.34.0
- golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e
- golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
- google.golang.org/appengine v1.4.0
-)
diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum
deleted file mode 100644
index 6f0079b0d..000000000
--- a/vendor/golang.org/x/oauth2/go.sum
+++ /dev/null
@@ -1,12 +0,0 @@
-cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go
deleted file mode 100644
index 743487188..000000000
--- a/vendor/golang.org/x/oauth2/internal/client_appengine.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import "google.golang.org/appengine/urlfetch"
-
-func init() {
- appengineClientHook = urlfetch.Client
-}
diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go
deleted file mode 100644
index 03265e888..000000000
--- a/vendor/golang.org/x/oauth2/internal/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package internal contains support packages for oauth2 package.
-package internal
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
deleted file mode 100644
index c0ab196cf..000000000
--- a/vendor/golang.org/x/oauth2/internal/oauth2.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
- "fmt"
-)
-
-// ParseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
-// PEM container or not. If so, it extracts the the private key
-// from PEM container before conversion. It only supports PEM
-// containers with no passphrase.
-func ParseKey(key []byte) (*rsa.PrivateKey, error) {
- block, _ := pem.Decode(key)
- if block != nil {
- key = block.Bytes
- }
- parsedKey, err := x509.ParsePKCS8PrivateKey(key)
- if err != nil {
- parsedKey, err = x509.ParsePKCS1PrivateKey(key)
- if err != nil {
- return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err)
- }
- }
- parsed, ok := parsedKey.(*rsa.PrivateKey)
- if !ok {
- return nil, errors.New("private key is invalid")
- }
- return parsed, nil
-}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
deleted file mode 100644
index 355c38696..000000000
--- a/vendor/golang.org/x/oauth2/internal/token.go
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "mime"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/net/context/ctxhttp"
-)
-
-// Token represents the credentials used to authorize
-// the requests to access protected resources on the OAuth 2.0
-// provider's backend.
-//
-// This type is a mirror of oauth2.Token and exists to break
-// an otherwise-circular dependency. Other internal packages
-// should convert this Token into an oauth2.Token before use.
-type Token struct {
- // AccessToken is the token that authorizes and authenticates
- // the requests.
- AccessToken string
-
- // TokenType is the type of token.
- // The Type method returns either this or "Bearer", the default.
- TokenType string
-
- // RefreshToken is a token that's used by the application
- // (as opposed to the user) to refresh the access token
- // if it expires.
- RefreshToken string
-
- // Expiry is the optional expiration time of the access token.
- //
- // If zero, TokenSource implementations will reuse the same
- // token forever and RefreshToken or equivalent
- // mechanisms for that TokenSource will not be used.
- Expiry time.Time
-
- // Raw optionally contains extra metadata from the server
- // when updating a token.
- Raw interface{}
-}
-
-// tokenJSON is the struct representing the HTTP response from OAuth2
-// providers returning a token in JSON form.
-type tokenJSON struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- RefreshToken string `json:"refresh_token"`
- ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
-}
-
-func (e *tokenJSON) expiry() (t time.Time) {
- if v := e.ExpiresIn; v != 0 {
- return time.Now().Add(time.Duration(v) * time.Second)
- }
- return
-}
-
-type expirationTime int32
-
-func (e *expirationTime) UnmarshalJSON(b []byte) error {
- if len(b) == 0 || string(b) == "null" {
- return nil
- }
- var n json.Number
- err := json.Unmarshal(b, &n)
- if err != nil {
- return err
- }
- i, err := n.Int64()
- if err != nil {
- return err
- }
- if i > math.MaxInt32 {
- i = math.MaxInt32
- }
- *e = expirationTime(i)
- return nil
-}
-
-// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
-//
-// Deprecated: this function no longer does anything. Caller code that
-// wants to avoid potential extra HTTP requests made during
-// auto-probing of the provider's auth style should set
-// Endpoint.AuthStyle.
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
-
-// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
-type AuthStyle int
-
-const (
- AuthStyleUnknown AuthStyle = 0
- AuthStyleInParams AuthStyle = 1
- AuthStyleInHeader AuthStyle = 2
-)
-
-// authStyleCache is the set of tokenURLs we've successfully used via
-// RetrieveToken and which style auth we ended up using.
-// It's called a cache, but it doesn't (yet?) shrink. It's expected that
-// the set of OAuth2 servers a program contacts over time is fixed and
-// small.
-var authStyleCache struct {
- sync.Mutex
- m map[string]AuthStyle // keyed by tokenURL
-}
-
-// ResetAuthCache resets the global authentication style cache used
-// for AuthStyleUnknown token requests.
-func ResetAuthCache() {
- authStyleCache.Lock()
- defer authStyleCache.Unlock()
- authStyleCache.m = nil
-}
-
-// lookupAuthStyle reports which auth style we last used with tokenURL
-// when calling RetrieveToken and whether we have ever done so.
-func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
- authStyleCache.Lock()
- defer authStyleCache.Unlock()
- style, ok = authStyleCache.m[tokenURL]
- return
-}
-
-// setAuthStyle adds an entry to authStyleCache, documented above.
-func setAuthStyle(tokenURL string, v AuthStyle) {
- authStyleCache.Lock()
- defer authStyleCache.Unlock()
- if authStyleCache.m == nil {
- authStyleCache.m = make(map[string]AuthStyle)
- }
- authStyleCache.m[tokenURL] = v
-}
-
-// newTokenRequest returns a new *http.Request to retrieve a new token
-// from tokenURL using the provided clientID, clientSecret, and POST
-// body parameters.
-//
-// inParams is whether the clientID & clientSecret should be encoded
-// as the POST body. An 'inParams' value of true means to send it in
-// the POST body (along with any values in v); false means to send it
-// in the Authorization header.
-func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) {
- if authStyle == AuthStyleInParams {
- v = cloneURLValues(v)
- if clientID != "" {
- v.Set("client_id", clientID)
- }
- if clientSecret != "" {
- v.Set("client_secret", clientSecret)
- }
- }
- req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- if authStyle == AuthStyleInHeader {
- req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
- }
- return req, nil
-}
-
-func cloneURLValues(v url.Values) url.Values {
- v2 := make(url.Values, len(v))
- for k, vv := range v {
- v2[k] = append([]string(nil), vv...)
- }
- return v2
-}
-
-func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) {
- needsAuthStyleProbe := authStyle == 0
- if needsAuthStyleProbe {
- if style, ok := lookupAuthStyle(tokenURL); ok {
- authStyle = style
- needsAuthStyleProbe = false
- } else {
- authStyle = AuthStyleInHeader // the first way we'll try
- }
- }
- req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
- if err != nil {
- return nil, err
- }
- token, err := doTokenRoundTrip(ctx, req)
- if err != nil && needsAuthStyleProbe {
- // If we get an error, assume the server wants the
- // clientID & clientSecret in a different form.
- // See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
- // In summary:
- // - Reddit only accepts client secret in the Authorization header
- // - Dropbox accepts either it in URL param or Auth header, but not both.
- // - Google only accepts URL param (not spec compliant?), not Auth header
- // - Stripe only accepts client secret in Auth header with Bearer method, not Basic
- //
- // We used to maintain a big table in this code of all the sites and which way
- // they went, but maintaining it didn't scale & got annoying.
- // So just try both ways.
- authStyle = AuthStyleInParams // the second way we'll try
- req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
- token, err = doTokenRoundTrip(ctx, req)
- }
- if needsAuthStyleProbe && err == nil {
- setAuthStyle(tokenURL, authStyle)
- }
- // Don't overwrite `RefreshToken` with an empty value
- // if this was a token refreshing request.
- if token != nil && token.RefreshToken == "" {
- token.RefreshToken = v.Get("refresh_token")
- }
- return token, err
-}
-
-func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
- r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
- if err != nil {
- return nil, err
- }
- body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
- r.Body.Close()
- if err != nil {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
- }
- if code := r.StatusCode; code < 200 || code > 299 {
- return nil, &RetrieveError{
- Response: r,
- Body: body,
- }
- }
-
- var token *Token
- content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
- switch content {
- case "application/x-www-form-urlencoded", "text/plain":
- vals, err := url.ParseQuery(string(body))
- if err != nil {
- return nil, err
- }
- token = &Token{
- AccessToken: vals.Get("access_token"),
- TokenType: vals.Get("token_type"),
- RefreshToken: vals.Get("refresh_token"),
- Raw: vals,
- }
- e := vals.Get("expires_in")
- expires, _ := strconv.Atoi(e)
- if expires != 0 {
- token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
- }
- default:
- var tj tokenJSON
- if err = json.Unmarshal(body, &tj); err != nil {
- return nil, err
- }
- token = &Token{
- AccessToken: tj.AccessToken,
- TokenType: tj.TokenType,
- RefreshToken: tj.RefreshToken,
- Expiry: tj.expiry(),
- Raw: make(map[string]interface{}),
- }
- json.Unmarshal(body, &token.Raw) // no error checks for optional fields
- }
- if token.AccessToken == "" {
- return nil, errors.New("oauth2: server response missing access_token")
- }
- return token, nil
-}
-
-type RetrieveError struct {
- Response *http.Response
- Body []byte
-}
-
-func (r *RetrieveError) Error() string {
- return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
-}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
deleted file mode 100644
index 572074a63..000000000
--- a/vendor/golang.org/x/oauth2/internal/transport.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "context"
- "net/http"
-)
-
-// HTTPClient is the context key to use with golang.org/x/net/context's
-// WithValue function to associate an *http.Client value with a context.
-var HTTPClient ContextKey
-
-// ContextKey is just an empty struct. It exists so HTTPClient can be
-// an immutable public variable with a unique type. It's immutable
-// because nobody else can create a ContextKey, being unexported.
-type ContextKey struct{}
-
-var appengineClientHook func(context.Context) *http.Client
-
-func ContextClient(ctx context.Context) *http.Client {
- if ctx != nil {
- if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
- return hc
- }
- }
- if appengineClientHook != nil {
- return appengineClientHook(ctx)
- }
- return http.DefaultClient
-}
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
deleted file mode 100644
index 291df5c83..000000000
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package oauth2 provides support for making
-// OAuth2 authorized and authenticated HTTP requests,
-// as specified in RFC 6749.
-// It can additionally grant authorization with Bearer JWT.
-package oauth2 // import "golang.org/x/oauth2"
-
-import (
- "bytes"
- "context"
- "errors"
- "net/http"
- "net/url"
- "strings"
- "sync"
-
- "golang.org/x/oauth2/internal"
-)
-
-// NoContext is the default context you should supply if not using
-// your own context.Context (see https://golang.org/x/net/context).
-//
-// Deprecated: Use context.Background() or context.TODO() instead.
-var NoContext = context.TODO()
-
-// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
-//
-// Deprecated: this function no longer does anything. Caller code that
-// wants to avoid potential extra HTTP requests made during
-// auto-probing of the provider's auth style should set
-// Endpoint.AuthStyle.
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
-
-// Config describes a typical 3-legged OAuth2 flow, with both the
-// client application information and the server's endpoint URLs.
-// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
-// package (https://golang.org/x/oauth2/clientcredentials).
-type Config struct {
- // ClientID is the application's ID.
- ClientID string
-
- // ClientSecret is the application's secret.
- ClientSecret string
-
- // Endpoint contains the resource server's token endpoint
- // URLs. These are constants specific to each server and are
- // often available via site-specific packages, such as
- // google.Endpoint or github.Endpoint.
- Endpoint Endpoint
-
- // RedirectURL is the URL to redirect users going through
- // the OAuth flow, after the resource owner's URLs.
- RedirectURL string
-
- // Scope specifies optional requested permissions.
- Scopes []string
-}
-
-// A TokenSource is anything that can return a token.
-type TokenSource interface {
- // Token returns a token or an error.
- // Token must be safe for concurrent use by multiple goroutines.
- // The returned Token must not be modified.
- Token() (*Token, error)
-}
-
-// Endpoint represents an OAuth 2.0 provider's authorization and token
-// endpoint URLs.
-type Endpoint struct {
- AuthURL string
- TokenURL string
-
- // AuthStyle optionally specifies how the endpoint wants the
- // client ID & client secret sent. The zero value means to
- // auto-detect.
- AuthStyle AuthStyle
-}
-
-// AuthStyle represents how requests for tokens are authenticated
-// to the server.
-type AuthStyle int
-
-const (
- // AuthStyleAutoDetect means to auto-detect which authentication
- // style the provider wants by trying both ways and caching
- // the successful way for the future.
- AuthStyleAutoDetect AuthStyle = 0
-
- // AuthStyleInParams sends the "client_id" and "client_secret"
- // in the POST body as application/x-www-form-urlencoded parameters.
- AuthStyleInParams AuthStyle = 1
-
- // AuthStyleInHeader sends the client_id and client_password
- // using HTTP Basic Authorization. This is an optional style
- // described in the OAuth2 RFC 6749 section 2.3.1.
- AuthStyleInHeader AuthStyle = 2
-)
-
-var (
- // AccessTypeOnline and AccessTypeOffline are options passed
- // to the Options.AuthCodeURL method. They modify the
- // "access_type" field that gets sent in the URL returned by
- // AuthCodeURL.
- //
- // Online is the default if neither is specified. If your
- // application needs to refresh access tokens when the user
- // is not present at the browser, then use offline. This will
- // result in your application obtaining a refresh token the
- // first time your application exchanges an authorization
- // code for a user.
- AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
- AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
-
- // ApprovalForce forces the users to view the consent dialog
- // and confirm the permissions request at the URL returned
- // from AuthCodeURL, even if they've already done so.
- ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent")
-)
-
-// An AuthCodeOption is passed to Config.AuthCodeURL.
-type AuthCodeOption interface {
- setValue(url.Values)
-}
-
-type setParam struct{ k, v string }
-
-func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
-
-// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
-// to a provider's authorization endpoint.
-func SetAuthURLParam(key, value string) AuthCodeOption {
- return setParam{key, value}
-}
-
-// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
-// that asks for permissions for the required scopes explicitly.
-//
-// State is a token to protect the user from CSRF attacks. You must
-// always provide a non-empty string and validate that it matches the
-// the state query parameter on your redirect callback.
-// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
-//
-// Opts may include AccessTypeOnline or AccessTypeOffline, as well
-// as ApprovalForce.
-// It can also be used to pass the PKCE challenge.
-// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
-func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
- var buf bytes.Buffer
- buf.WriteString(c.Endpoint.AuthURL)
- v := url.Values{
- "response_type": {"code"},
- "client_id": {c.ClientID},
- }
- if c.RedirectURL != "" {
- v.Set("redirect_uri", c.RedirectURL)
- }
- if len(c.Scopes) > 0 {
- v.Set("scope", strings.Join(c.Scopes, " "))
- }
- if state != "" {
- // TODO(light): Docs say never to omit state; don't allow empty.
- v.Set("state", state)
- }
- for _, opt := range opts {
- opt.setValue(v)
- }
- if strings.Contains(c.Endpoint.AuthURL, "?") {
- buf.WriteByte('&')
- } else {
- buf.WriteByte('?')
- }
- buf.WriteString(v.Encode())
- return buf.String()
-}
-
-// PasswordCredentialsToken converts a resource owner username and password
-// pair into a token.
-//
-// Per the RFC, this grant type should only be used "when there is a high
-// degree of trust between the resource owner and the client (e.g., the client
-// is part of the device operating system or a highly privileged application),
-// and when other authorization grant types are not available."
-// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
-//
-// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
-func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
- v := url.Values{
- "grant_type": {"password"},
- "username": {username},
- "password": {password},
- }
- if len(c.Scopes) > 0 {
- v.Set("scope", strings.Join(c.Scopes, " "))
- }
- return retrieveToken(ctx, c, v)
-}
-
-// Exchange converts an authorization code into a token.
-//
-// It is used after a resource provider redirects the user back
-// to the Redirect URI (the URL obtained from AuthCodeURL).
-//
-// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
-//
-// The code will be in the *http.Request.FormValue("code"). Before
-// calling Exchange, be sure to validate FormValue("state").
-//
-// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
-// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
-func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
- v := url.Values{
- "grant_type": {"authorization_code"},
- "code": {code},
- }
- if c.RedirectURL != "" {
- v.Set("redirect_uri", c.RedirectURL)
- }
- for _, opt := range opts {
- opt.setValue(v)
- }
- return retrieveToken(ctx, c, v)
-}
-
-// Client returns an HTTP client using the provided token.
-// The token will auto-refresh as necessary. The underlying
-// HTTP transport will be obtained using the provided context.
-// The returned client and its Transport should not be modified.
-func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
- return NewClient(ctx, c.TokenSource(ctx, t))
-}
-
-// TokenSource returns a TokenSource that returns t until t expires,
-// automatically refreshing it as necessary using the provided context.
-//
-// Most users will use Config.Client instead.
-func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
- tkr := &tokenRefresher{
- ctx: ctx,
- conf: c,
- }
- if t != nil {
- tkr.refreshToken = t.RefreshToken
- }
- return &reuseTokenSource{
- t: t,
- new: tkr,
- }
-}
-
-// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
-// HTTP requests to renew a token using a RefreshToken.
-type tokenRefresher struct {
- ctx context.Context // used to get HTTP requests
- conf *Config
- refreshToken string
-}
-
-// WARNING: Token is not safe for concurrent access, as it
-// updates the tokenRefresher's refreshToken field.
-// Within this package, it is used by reuseTokenSource which
-// synchronizes calls to this method with its own mutex.
-func (tf *tokenRefresher) Token() (*Token, error) {
- if tf.refreshToken == "" {
- return nil, errors.New("oauth2: token expired and refresh token is not set")
- }
-
- tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
- "grant_type": {"refresh_token"},
- "refresh_token": {tf.refreshToken},
- })
-
- if err != nil {
- return nil, err
- }
- if tf.refreshToken != tk.RefreshToken {
- tf.refreshToken = tk.RefreshToken
- }
- return tk, err
-}
-
-// reuseTokenSource is a TokenSource that holds a single token in memory
-// and validates its expiry before each call to retrieve it with
-// Token. If it's expired, it will be auto-refreshed using the
-// new TokenSource.
-type reuseTokenSource struct {
- new TokenSource // called when t is expired.
-
- mu sync.Mutex // guards t
- t *Token
-}
-
-// Token returns the current token if it's still valid, else will
-// refresh the current token (using r.Context for HTTP client
-// information) and return the new one.
-func (s *reuseTokenSource) Token() (*Token, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- if s.t.Valid() {
- return s.t, nil
- }
- t, err := s.new.Token()
- if err != nil {
- return nil, err
- }
- s.t = t
- return t, nil
-}
-
-// StaticTokenSource returns a TokenSource that always returns the same token.
-// Because the provided token t is never refreshed, StaticTokenSource is only
-// useful for tokens that never expire.
-func StaticTokenSource(t *Token) TokenSource {
- return staticTokenSource{t}
-}
-
-// staticTokenSource is a TokenSource that always returns the same Token.
-type staticTokenSource struct {
- t *Token
-}
-
-func (s staticTokenSource) Token() (*Token, error) {
- return s.t, nil
-}
-
-// HTTPClient is the context key to use with golang.org/x/net/context's
-// WithValue function to associate an *http.Client value with a context.
-var HTTPClient internal.ContextKey
-
-// NewClient creates an *http.Client from a Context and TokenSource.
-// The returned client is not valid beyond the lifetime of the context.
-//
-// Note that if a custom *http.Client is provided via the Context it
-// is used only for token acquisition and is not used to configure the
-// *http.Client returned from NewClient.
-//
-// As a special case, if src is nil, a non-OAuth2 client is returned
-// using the provided context. This exists to support related OAuth2
-// packages.
-func NewClient(ctx context.Context, src TokenSource) *http.Client {
- if src == nil {
- return internal.ContextClient(ctx)
- }
- return &http.Client{
- Transport: &Transport{
- Base: internal.ContextClient(ctx).Transport,
- Source: ReuseTokenSource(nil, src),
- },
- }
-}
-
-// ReuseTokenSource returns a TokenSource which repeatedly returns the
-// same token as long as it's valid, starting with t.
-// When its cached token is invalid, a new token is obtained from src.
-//
-// ReuseTokenSource is typically used to reuse tokens from a cache
-// (such as a file on disk) between runs of a program, rather than
-// obtaining new tokens unnecessarily.
-//
-// The initial token t may be nil, in which case the TokenSource is
-// wrapped in a caching version if it isn't one already. This also
-// means it's always safe to wrap ReuseTokenSource around any other
-// TokenSource without adverse effects.
-func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
- // Don't wrap a reuseTokenSource in itself. That would work,
- // but cause an unnecessary number of mutex operations.
- // Just build the equivalent one.
- if rt, ok := src.(*reuseTokenSource); ok {
- if t == nil {
- // Just use it directly.
- return rt
- }
- src = rt.new
- }
- return &reuseTokenSource{
- t: t,
- new: src,
- }
-}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
deleted file mode 100644
index 822720341..000000000
--- a/vendor/golang.org/x/oauth2/token.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package oauth2
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "golang.org/x/oauth2/internal"
-)
-
-// expiryDelta determines how earlier a token should be considered
-// expired than its actual expiration time. It is used to avoid late
-// expirations due to client-server time mismatches.
-const expiryDelta = 10 * time.Second
-
-// Token represents the credentials used to authorize
-// the requests to access protected resources on the OAuth 2.0
-// provider's backend.
-//
-// Most users of this package should not access fields of Token
-// directly. They're exported mostly for use by related packages
-// implementing derivative OAuth2 flows.
-type Token struct {
- // AccessToken is the token that authorizes and authenticates
- // the requests.
- AccessToken string `json:"access_token"`
-
- // TokenType is the type of token.
- // The Type method returns either this or "Bearer", the default.
- TokenType string `json:"token_type,omitempty"`
-
- // RefreshToken is a token that's used by the application
- // (as opposed to the user) to refresh the access token
- // if it expires.
- RefreshToken string `json:"refresh_token,omitempty"`
-
- // Expiry is the optional expiration time of the access token.
- //
- // If zero, TokenSource implementations will reuse the same
- // token forever and RefreshToken or equivalent
- // mechanisms for that TokenSource will not be used.
- Expiry time.Time `json:"expiry,omitempty"`
-
- // raw optionally contains extra metadata from the server
- // when updating a token.
- raw interface{}
-}
-
-// Type returns t.TokenType if non-empty, else "Bearer".
-func (t *Token) Type() string {
- if strings.EqualFold(t.TokenType, "bearer") {
- return "Bearer"
- }
- if strings.EqualFold(t.TokenType, "mac") {
- return "MAC"
- }
- if strings.EqualFold(t.TokenType, "basic") {
- return "Basic"
- }
- if t.TokenType != "" {
- return t.TokenType
- }
- return "Bearer"
-}
-
-// SetAuthHeader sets the Authorization header to r using the access
-// token in t.
-//
-// This method is unnecessary when using Transport or an HTTP Client
-// returned by this package.
-func (t *Token) SetAuthHeader(r *http.Request) {
- r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
-}
-
-// WithExtra returns a new Token that's a clone of t, but using the
-// provided raw extra map. This is only intended for use by packages
-// implementing derivative OAuth2 flows.
-func (t *Token) WithExtra(extra interface{}) *Token {
- t2 := new(Token)
- *t2 = *t
- t2.raw = extra
- return t2
-}
-
-// Extra returns an extra field.
-// Extra fields are key-value pairs returned by the server as a
-// part of the token retrieval response.
-func (t *Token) Extra(key string) interface{} {
- if raw, ok := t.raw.(map[string]interface{}); ok {
- return raw[key]
- }
-
- vals, ok := t.raw.(url.Values)
- if !ok {
- return nil
- }
-
- v := vals.Get(key)
- switch s := strings.TrimSpace(v); strings.Count(s, ".") {
- case 0: // Contains no "."; try to parse as int
- if i, err := strconv.ParseInt(s, 10, 64); err == nil {
- return i
- }
- case 1: // Contains a single "."; try to parse as float
- if f, err := strconv.ParseFloat(s, 64); err == nil {
- return f
- }
- }
-
- return v
-}
-
-// timeNow is time.Now but pulled out as a variable for tests.
-var timeNow = time.Now
-
-// expired reports whether the token is expired.
-// t must be non-nil.
-func (t *Token) expired() bool {
- if t.Expiry.IsZero() {
- return false
- }
- return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
-}
-
-// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
-func (t *Token) Valid() bool {
- return t != nil && t.AccessToken != "" && !t.expired()
-}
-
-// tokenFromInternal maps an *internal.Token struct into
-// a *Token struct.
-func tokenFromInternal(t *internal.Token) *Token {
- if t == nil {
- return nil
- }
- return &Token{
- AccessToken: t.AccessToken,
- TokenType: t.TokenType,
- RefreshToken: t.RefreshToken,
- Expiry: t.Expiry,
- raw: t.Raw,
- }
-}
-
-// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
-// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
-// with an error..
-func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
- tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle))
- if err != nil {
- if rErr, ok := err.(*internal.RetrieveError); ok {
- return nil, (*RetrieveError)(rErr)
- }
- return nil, err
- }
- return tokenFromInternal(tk), nil
-}
-
-// RetrieveError is the error returned when the token endpoint returns a
-// non-2XX HTTP status code.
-type RetrieveError struct {
- Response *http.Response
- // Body is the body that was consumed by reading Response.Body.
- // It may be truncated.
- Body []byte
-}
-
-func (r *RetrieveError) Error() string {
- return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
-}
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
deleted file mode 100644
index 90657915f..000000000
--- a/vendor/golang.org/x/oauth2/transport.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package oauth2
-
-import (
- "errors"
- "log"
- "net/http"
- "sync"
-)
-
-// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
-// wrapping a base RoundTripper and adding an Authorization header
-// with a token from the supplied Sources.
-//
-// Transport is a low-level mechanism. Most code will use the
-// higher-level Config.Client method instead.
-type Transport struct {
- // Source supplies the token to add to outgoing requests'
- // Authorization headers.
- Source TokenSource
-
- // Base is the base RoundTripper used to make HTTP requests.
- // If nil, http.DefaultTransport is used.
- Base http.RoundTripper
-}
-
-// RoundTrip authorizes and authenticates the request with an
-// access token from Transport's Source.
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- reqBodyClosed := false
- if req.Body != nil {
- defer func() {
- if !reqBodyClosed {
- req.Body.Close()
- }
- }()
- }
-
- if t.Source == nil {
- return nil, errors.New("oauth2: Transport's Source is nil")
- }
- token, err := t.Source.Token()
- if err != nil {
- return nil, err
- }
-
- req2 := cloneRequest(req) // per RoundTripper contract
- token.SetAuthHeader(req2)
-
- // req.Body is assumed to be closed by the base RoundTripper.
- reqBodyClosed = true
- return t.base().RoundTrip(req2)
-}
-
-var cancelOnce sync.Once
-
-// CancelRequest does nothing. It used to be a legacy cancellation mechanism
-// but now only it only logs on first use to warn that it's deprecated.
-//
-// Deprecated: use contexts for cancellation instead.
-func (t *Transport) CancelRequest(req *http.Request) {
- cancelOnce.Do(func() {
- log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts")
- })
-}
-
-func (t *Transport) base() http.RoundTripper {
- if t.Base != nil {
- return t.Base
- }
- return http.DefaultTransport
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header, len(r.Header))
- for k, s := range r.Header {
- r2.Header[k] = append([]string(nil), s...)
- }
- return r2
-}
diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS
deleted file mode 100644
index 15167cd74..000000000
--- a/vendor/golang.org/x/time/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS
deleted file mode 100644
index 1c4577e96..000000000
--- a/vendor/golang.org/x/time/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
deleted file mode 100644
index 6a66aea5e..000000000
--- a/vendor/golang.org/x/time/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS
deleted file mode 100644
index 733099041..000000000
--- a/vendor/golang.org/x/time/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
deleted file mode 100644
index 563f70429..000000000
--- a/vendor/golang.org/x/time/rate/rate.go
+++ /dev/null
@@ -1,400 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package rate provides a rate limiter.
-package rate
-
-import (
- "context"
- "fmt"
- "math"
- "sync"
- "time"
-)
-
-// Limit defines the maximum frequency of some events.
-// Limit is represented as number of events per second.
-// A zero Limit allows no events.
-type Limit float64
-
-// Inf is the infinite rate limit; it allows all events (even if burst is zero).
-const Inf = Limit(math.MaxFloat64)
-
-// Every converts a minimum time interval between events to a Limit.
-func Every(interval time.Duration) Limit {
- if interval <= 0 {
- return Inf
- }
- return 1 / Limit(interval.Seconds())
-}
-
-// A Limiter controls how frequently events are allowed to happen.
-// It implements a "token bucket" of size b, initially full and refilled
-// at rate r tokens per second.
-// Informally, in any large enough time interval, the Limiter limits the
-// rate to r tokens per second, with a maximum burst size of b events.
-// As a special case, if r == Inf (the infinite rate), b is ignored.
-// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets.
-//
-// The zero value is a valid Limiter, but it will reject all events.
-// Use NewLimiter to create non-zero Limiters.
-//
-// Limiter has three main methods, Allow, Reserve, and Wait.
-// Most callers should use Wait.
-//
-// Each of the three methods consumes a single token.
-// They differ in their behavior when no token is available.
-// If no token is available, Allow returns false.
-// If no token is available, Reserve returns a reservation for a future token
-// and the amount of time the caller must wait before using it.
-// If no token is available, Wait blocks until one can be obtained
-// or its associated context.Context is canceled.
-//
-// The methods AllowN, ReserveN, and WaitN consume n tokens.
-type Limiter struct {
- limit Limit
- burst int
-
- mu sync.Mutex
- tokens float64
- // last is the last time the limiter's tokens field was updated
- last time.Time
- // lastEvent is the latest time of a rate-limited event (past or future)
- lastEvent time.Time
-}
-
-// Limit returns the maximum overall event rate.
-func (lim *Limiter) Limit() Limit {
- lim.mu.Lock()
- defer lim.mu.Unlock()
- return lim.limit
-}
-
-// Burst returns the maximum burst size. Burst is the maximum number of tokens
-// that can be consumed in a single call to Allow, Reserve, or Wait, so higher
-// Burst values allow more events to happen at once.
-// A zero Burst allows no events, unless limit == Inf.
-func (lim *Limiter) Burst() int {
- return lim.burst
-}
-
-// NewLimiter returns a new Limiter that allows events up to rate r and permits
-// bursts of at most b tokens.
-func NewLimiter(r Limit, b int) *Limiter {
- return &Limiter{
- limit: r,
- burst: b,
- }
-}
-
-// Allow is shorthand for AllowN(time.Now(), 1).
-func (lim *Limiter) Allow() bool {
- return lim.AllowN(time.Now(), 1)
-}
-
-// AllowN reports whether n events may happen at time now.
-// Use this method if you intend to drop / skip events that exceed the rate limit.
-// Otherwise use Reserve or Wait.
-func (lim *Limiter) AllowN(now time.Time, n int) bool {
- return lim.reserveN(now, n, 0).ok
-}
-
-// A Reservation holds information about events that are permitted by a Limiter to happen after a delay.
-// A Reservation may be canceled, which may enable the Limiter to permit additional events.
-type Reservation struct {
- ok bool
- lim *Limiter
- tokens int
- timeToAct time.Time
- // This is the Limit at reservation time, it can change later.
- limit Limit
-}
-
-// OK returns whether the limiter can provide the requested number of tokens
-// within the maximum wait time. If OK is false, Delay returns InfDuration, and
-// Cancel does nothing.
-func (r *Reservation) OK() bool {
- return r.ok
-}
-
-// Delay is shorthand for DelayFrom(time.Now()).
-func (r *Reservation) Delay() time.Duration {
- return r.DelayFrom(time.Now())
-}
-
-// InfDuration is the duration returned by Delay when a Reservation is not OK.
-const InfDuration = time.Duration(1<<63 - 1)
-
-// DelayFrom returns the duration for which the reservation holder must wait
-// before taking the reserved action. Zero duration means act immediately.
-// InfDuration means the limiter cannot grant the tokens requested in this
-// Reservation within the maximum wait time.
-func (r *Reservation) DelayFrom(now time.Time) time.Duration {
- if !r.ok {
- return InfDuration
- }
- delay := r.timeToAct.Sub(now)
- if delay < 0 {
- return 0
- }
- return delay
-}
-
-// Cancel is shorthand for CancelAt(time.Now()).
-func (r *Reservation) Cancel() {
- r.CancelAt(time.Now())
- return
-}
-
-// CancelAt indicates that the reservation holder will not perform the reserved action
-// and reverses the effects of this Reservation on the rate limit as much as possible,
-// considering that other reservations may have already been made.
-func (r *Reservation) CancelAt(now time.Time) {
- if !r.ok {
- return
- }
-
- r.lim.mu.Lock()
- defer r.lim.mu.Unlock()
-
- if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) {
- return
- }
-
- // calculate tokens to restore
- // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved
- // after r was obtained. These tokens should not be restored.
- restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct))
- if restoreTokens <= 0 {
- return
- }
- // advance time to now
- now, _, tokens := r.lim.advance(now)
- // calculate new number of tokens
- tokens += restoreTokens
- if burst := float64(r.lim.burst); tokens > burst {
- tokens = burst
- }
- // update state
- r.lim.last = now
- r.lim.tokens = tokens
- if r.timeToAct == r.lim.lastEvent {
- prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens)))
- if !prevEvent.Before(now) {
- r.lim.lastEvent = prevEvent
- }
- }
-
- return
-}
-
-// Reserve is shorthand for ReserveN(time.Now(), 1).
-func (lim *Limiter) Reserve() *Reservation {
- return lim.ReserveN(time.Now(), 1)
-}
-
-// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen.
-// The Limiter takes this Reservation into account when allowing future events.
-// ReserveN returns false if n exceeds the Limiter's burst size.
-// Usage example:
-// r := lim.ReserveN(time.Now(), 1)
-// if !r.OK() {
-// // Not allowed to act! Did you remember to set lim.burst to be > 0 ?
-// return
-// }
-// time.Sleep(r.Delay())
-// Act()
-// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events.
-// If you need to respect a deadline or cancel the delay, use Wait instead.
-// To drop or skip events exceeding rate limit, use Allow instead.
-func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation {
- r := lim.reserveN(now, n, InfDuration)
- return &r
-}
-
-// Wait is shorthand for WaitN(ctx, 1).
-func (lim *Limiter) Wait(ctx context.Context) (err error) {
- return lim.WaitN(ctx, 1)
-}
-
-// WaitN blocks until lim permits n events to happen.
-// It returns an error if n exceeds the Limiter's burst size, the Context is
-// canceled, or the expected wait time exceeds the Context's Deadline.
-// The burst limit is ignored if the rate limit is Inf.
-func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
- lim.mu.Lock()
- burst := lim.burst
- limit := lim.limit
- lim.mu.Unlock()
-
- if n > burst && limit != Inf {
- return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst)
- }
- // Check if ctx is already cancelled
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- // Determine wait limit
- now := time.Now()
- waitLimit := InfDuration
- if deadline, ok := ctx.Deadline(); ok {
- waitLimit = deadline.Sub(now)
- }
- // Reserve
- r := lim.reserveN(now, n, waitLimit)
- if !r.ok {
- return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n)
- }
- // Wait if necessary
- delay := r.DelayFrom(now)
- if delay == 0 {
- return nil
- }
- t := time.NewTimer(delay)
- defer t.Stop()
- select {
- case <-t.C:
- // We can proceed.
- return nil
- case <-ctx.Done():
- // Context was canceled before we could proceed. Cancel the
- // reservation, which may permit other events to proceed sooner.
- r.Cancel()
- return ctx.Err()
- }
-}
-
-// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit).
-func (lim *Limiter) SetLimit(newLimit Limit) {
- lim.SetLimitAt(time.Now(), newLimit)
-}
-
-// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated
-// or underutilized by those which reserved (using Reserve or Wait) but did not yet act
-// before SetLimitAt was called.
-func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) {
- lim.mu.Lock()
- defer lim.mu.Unlock()
-
- now, _, tokens := lim.advance(now)
-
- lim.last = now
- lim.tokens = tokens
- lim.limit = newLimit
-}
-
-// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst).
-func (lim *Limiter) SetBurst(newBurst int) {
- lim.SetBurstAt(time.Now(), newBurst)
-}
-
-// SetBurstAt sets a new burst size for the limiter.
-func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) {
- lim.mu.Lock()
- defer lim.mu.Unlock()
-
- now, _, tokens := lim.advance(now)
-
- lim.last = now
- lim.tokens = tokens
- lim.burst = newBurst
-}
-
-// reserveN is a helper method for AllowN, ReserveN, and WaitN.
-// maxFutureReserve specifies the maximum reservation wait duration allowed.
-// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN.
-func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation {
- lim.mu.Lock()
-
- if lim.limit == Inf {
- lim.mu.Unlock()
- return Reservation{
- ok: true,
- lim: lim,
- tokens: n,
- timeToAct: now,
- }
- }
-
- now, last, tokens := lim.advance(now)
-
- // Calculate the remaining number of tokens resulting from the request.
- tokens -= float64(n)
-
- // Calculate the wait duration
- var waitDuration time.Duration
- if tokens < 0 {
- waitDuration = lim.limit.durationFromTokens(-tokens)
- }
-
- // Decide result
- ok := n <= lim.burst && waitDuration <= maxFutureReserve
-
- // Prepare reservation
- r := Reservation{
- ok: ok,
- lim: lim,
- limit: lim.limit,
- }
- if ok {
- r.tokens = n
- r.timeToAct = now.Add(waitDuration)
- }
-
- // Update state
- if ok {
- lim.last = now
- lim.tokens = tokens
- lim.lastEvent = r.timeToAct
- } else {
- lim.last = last
- }
-
- lim.mu.Unlock()
- return r
-}
-
-// advance calculates and returns an updated state for lim resulting from the passage of time.
-// lim is not changed.
-func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) {
- last := lim.last
- if now.Before(last) {
- last = now
- }
-
- // Avoid making delta overflow below when last is very old.
- maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens)
- elapsed := now.Sub(last)
- if elapsed > maxElapsed {
- elapsed = maxElapsed
- }
-
- // Calculate the new number of tokens, due to time that passed.
- delta := lim.limit.tokensFromDuration(elapsed)
- tokens := lim.tokens + delta
- if burst := float64(lim.burst); tokens > burst {
- tokens = burst
- }
-
- return now, last, tokens
-}
-
-// durationFromTokens is a unit conversion function from the number of tokens to the duration
-// of time it takes to accumulate them at a rate of limit tokens per second.
-func (limit Limit) durationFromTokens(tokens float64) time.Duration {
- seconds := tokens / float64(limit)
- return time.Nanosecond * time.Duration(1e9*seconds)
-}
-
-// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
-// which could be accumulated during that duration at a rate of limit tokens per second.
-func (limit Limit) tokensFromDuration(d time.Duration) float64 {
- // Split the integer and fractional parts ourself to minimize rounding errors.
- // See golang.org/issues/34861.
- sec := float64(d/time.Second) * float64(limit)
- nsec := float64(d%time.Second) * float64(limit)
- return sec + nsec/1e9
-}
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/google.golang.org/appengine/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
deleted file mode 100644
index 721053c20..000000000
--- a/vendor/google.golang.org/appengine/internal/api.go
+++ /dev/null
@@ -1,678 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/url"
- "os"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-
- basepb "google.golang.org/appengine/internal/base"
- logpb "google.golang.org/appengine/internal/log"
- remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-const (
- apiPath = "/rpc_http"
- defaultTicketSuffix = "/default.20150612t184001.0"
-)
-
-var (
- // Incoming headers.
- ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
- dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
- traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
- curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
- userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
- remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
- devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id")
-
- // Outgoing headers.
- apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
- apiEndpointHeaderValue = []string{"app-engine-apis"}
- apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
- apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
- apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
- apiContentType = http.CanonicalHeaderKey("Content-Type")
- apiContentTypeValue = []string{"application/octet-stream"}
- logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
-
- apiHTTPClient = &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: limitDial,
- MaxIdleConns: 1000,
- MaxIdleConnsPerHost: 10000,
- IdleConnTimeout: 90 * time.Second,
- },
- }
-
- defaultTicketOnce sync.Once
- defaultTicket string
- backgroundContextOnce sync.Once
- backgroundContext netcontext.Context
-)
-
-func apiURL() *url.URL {
- host, port := "appengine.googleapis.internal", "10001"
- if h := os.Getenv("API_HOST"); h != "" {
- host = h
- }
- if p := os.Getenv("API_PORT"); p != "" {
- port = p
- }
- return &url.URL{
- Scheme: "http",
- Host: host + ":" + port,
- Path: apiPath,
- }
-}
-
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- c := &context{
- req: r,
- outHeader: w.Header(),
- apiURL: apiURL(),
- }
- r = r.WithContext(withContext(r.Context(), c))
- c.req = r
-
- stopFlushing := make(chan int)
-
- // Patch up RemoteAddr so it looks reasonable.
- if addr := r.Header.Get(userIPHeader); addr != "" {
- r.RemoteAddr = addr
- } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
- r.RemoteAddr = addr
- } else {
- // Should not normally reach here, but pick a sensible default anyway.
- r.RemoteAddr = "127.0.0.1"
- }
- // The address in the headers will most likely be of these forms:
- // 123.123.123.123
- // 2001:db8::1
- // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
- if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
- // Assume the remote address is only a host; add a default port.
- r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
- }
-
- // Start goroutine responsible for flushing app logs.
- // This is done after adding c to ctx.m (and stopped before removing it)
- // because flushing logs requires making an API call.
- go c.logFlusher(stopFlushing)
-
- executeRequestSafely(c, r)
- c.outHeader = nil // make sure header changes aren't respected any more
-
- stopFlushing <- 1 // any logging beyond this point will be dropped
-
- // Flush any pending logs asynchronously.
- c.pendingLogs.Lock()
- flushes := c.pendingLogs.flushes
- if len(c.pendingLogs.lines) > 0 {
- flushes++
- }
- c.pendingLogs.Unlock()
- flushed := make(chan struct{})
- go func() {
- defer close(flushed)
- // Force a log flush, because with very short requests we
- // may not ever flush logs.
- c.flushLog(true)
- }()
- w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
-
- // Avoid nil Write call if c.Write is never called.
- if c.outCode != 0 {
- w.WriteHeader(c.outCode)
- }
- if c.outBody != nil {
- w.Write(c.outBody)
- }
- // Wait for the last flush to complete before returning,
- // otherwise the security ticket will not be valid.
- <-flushed
-}
-
-func executeRequestSafely(c *context, r *http.Request) {
- defer func() {
- if x := recover(); x != nil {
- logf(c, 4, "%s", renderPanic(x)) // 4 == critical
- c.outCode = 500
- }
- }()
-
- http.DefaultServeMux.ServeHTTP(c, r)
-}
-
-func renderPanic(x interface{}) string {
- buf := make([]byte, 16<<10) // 16 KB should be plenty
- buf = buf[:runtime.Stack(buf, false)]
-
- // Remove the first few stack frames:
- // this func
- // the recover closure in the caller
- // That will root the stack trace at the site of the panic.
- const (
- skipStart = "internal.renderPanic"
- skipFrames = 2
- )
- start := bytes.Index(buf, []byte(skipStart))
- p := start
- for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
- p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
- if p < 0 {
- break
- }
- }
- if p >= 0 {
- // buf[start:p+1] is the block to remove.
- // Copy buf[p+1:] over buf[start:] and shrink buf.
- copy(buf[start:], buf[p+1:])
- buf = buf[:len(buf)-(p+1-start)]
- }
-
- // Add panic heading.
- head := fmt.Sprintf("panic: %v\n\n", x)
- if len(head) > len(buf) {
- // Extremely unlikely to happen.
- return head
- }
- copy(buf[len(head):], buf)
- copy(buf, head)
-
- return string(buf)
-}
-
-// context represents the context of an in-flight HTTP request.
-// It implements the appengine.Context and http.ResponseWriter interfaces.
-type context struct {
- req *http.Request
-
- outCode int
- outHeader http.Header
- outBody []byte
-
- pendingLogs struct {
- sync.Mutex
- lines []*logpb.UserAppLogLine
- flushes int
- }
-
- apiURL *url.URL
-}
-
-var contextKey = "holds a *context"
-
-// jointContext joins two contexts in a superficial way.
-// It takes values and timeouts from a base context, and only values from another context.
-type jointContext struct {
- base netcontext.Context
- valuesOnly netcontext.Context
-}
-
-func (c jointContext) Deadline() (time.Time, bool) {
- return c.base.Deadline()
-}
-
-func (c jointContext) Done() <-chan struct{} {
- return c.base.Done()
-}
-
-func (c jointContext) Err() error {
- return c.base.Err()
-}
-
-func (c jointContext) Value(key interface{}) interface{} {
- if val := c.base.Value(key); val != nil {
- return val
- }
- return c.valuesOnly.Value(key)
-}
-
-// fromContext returns the App Engine context or nil if ctx is not
-// derived from an App Engine context.
-func fromContext(ctx netcontext.Context) *context {
- c, _ := ctx.Value(&contextKey).(*context)
- return c
-}
-
-func withContext(parent netcontext.Context, c *context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
- if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
- ctx = withNamespace(ctx, ns)
- }
- return ctx
-}
-
-func toContext(c *context) netcontext.Context {
- return withContext(netcontext.Background(), c)
-}
-
-func IncomingHeaders(ctx netcontext.Context) http.Header {
- if c := fromContext(ctx); c != nil {
- return c.req.Header
- }
- return nil
-}
-
-func ReqContext(req *http.Request) netcontext.Context {
- return req.Context()
-}
-
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
- return jointContext{
- base: parent,
- valuesOnly: req.Context(),
- }
-}
-
-// DefaultTicket returns a ticket used for background context or dev_appserver.
-func DefaultTicket() string {
- defaultTicketOnce.Do(func() {
- if IsDevAppServer() {
- defaultTicket = "testapp" + defaultTicketSuffix
- return
- }
- appID := partitionlessAppID()
- escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
- majVersion := VersionID(nil)
- if i := strings.Index(majVersion, "."); i > 0 {
- majVersion = majVersion[:i]
- }
- defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
- })
- return defaultTicket
-}
-
-func BackgroundContext() netcontext.Context {
- backgroundContextOnce.Do(func() {
- // Compute background security ticket.
- ticket := DefaultTicket()
-
- c := &context{
- req: &http.Request{
- Header: http.Header{
- ticketHeader: []string{ticket},
- },
- },
- apiURL: apiURL(),
- }
- backgroundContext = toContext(c)
-
- // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
- go c.logFlusher(make(chan int))
- })
-
- return backgroundContext
-}
-
-// RegisterTestRequest registers the HTTP request req for testing, such that
-// any API calls are sent to the provided URL. It returns a closure to delete
-// the registration.
-// It should only be used by aetest package.
-func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
- c := &context{
- req: req,
- apiURL: apiURL,
- }
- ctx := withContext(decorate(req.Context()), c)
- req = req.WithContext(ctx)
- c.req = req
- return req, func() {}
-}
-
-var errTimeout = &CallError{
- Detail: "Deadline exceeded",
- Code: int32(remotepb.RpcError_CANCELLED),
- Timeout: true,
-}
-
-func (c *context) Header() http.Header { return c.outHeader }
-
-// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
-// codes do not permit a response body (nor response entity headers such as
-// Content-Length, Content-Type, etc).
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-func (c *context) Write(b []byte) (int, error) {
- if c.outCode == 0 {
- c.WriteHeader(http.StatusOK)
- }
- if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
- return 0, http.ErrBodyNotAllowed
- }
- c.outBody = append(c.outBody, b...)
- return len(b), nil
-}
-
-func (c *context) WriteHeader(code int) {
- if c.outCode != 0 {
- logf(c, 3, "WriteHeader called multiple times on request.") // error level
- return
- }
- c.outCode = code
-}
-
-func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
- hreq := &http.Request{
- Method: "POST",
- URL: c.apiURL,
- Header: http.Header{
- apiEndpointHeader: apiEndpointHeaderValue,
- apiMethodHeader: apiMethodHeaderValue,
- apiContentType: apiContentTypeValue,
- apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
- },
- Body: ioutil.NopCloser(bytes.NewReader(body)),
- ContentLength: int64(len(body)),
- Host: c.apiURL.Host,
- }
- if info := c.req.Header.Get(dapperHeader); info != "" {
- hreq.Header.Set(dapperHeader, info)
- }
- if info := c.req.Header.Get(traceHeader); info != "" {
- hreq.Header.Set(traceHeader, info)
- }
-
- tr := apiHTTPClient.Transport.(*http.Transport)
-
- var timedOut int32 // atomic; set to 1 if timed out
- t := time.AfterFunc(timeout, func() {
- atomic.StoreInt32(&timedOut, 1)
- tr.CancelRequest(hreq)
- })
- defer t.Stop()
- defer func() {
- // Check if timeout was exceeded.
- if atomic.LoadInt32(&timedOut) != 0 {
- err = errTimeout
- }
- }()
-
- hresp, err := apiHTTPClient.Do(hreq)
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- defer hresp.Body.Close()
- hrespBody, err := ioutil.ReadAll(hresp.Body)
- if hresp.StatusCode != 200 {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge response bad: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return hrespBody, nil
-}
-
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
- if ns := NamespaceFromContext(ctx); ns != "" {
- if fn, ok := NamespaceMods[service]; ok {
- fn(in, ns)
- }
- }
-
- if f, ctx, ok := callOverrideFromContext(ctx); ok {
- return f(ctx, service, method, in, out)
- }
-
- // Handle already-done contexts quickly.
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- c := fromContext(ctx)
- if c == nil {
- // Give a good error message rather than a panic lower down.
- return errNotAppEngineContext
- }
-
- // Apply transaction modifications if we're in a transaction.
- if t := transactionFromContext(ctx); t != nil {
- if t.finished {
- return errors.New("transaction context has expired")
- }
- applyTransaction(in, &t.transaction)
- }
-
- // Default RPC timeout is 60s.
- timeout := 60 * time.Second
- if deadline, ok := ctx.Deadline(); ok {
- timeout = deadline.Sub(time.Now())
- }
-
- data, err := proto.Marshal(in)
- if err != nil {
- return err
- }
-
- ticket := c.req.Header.Get(ticketHeader)
- // Use a test ticket under test environment.
- if ticket == "" {
- if appid := ctx.Value(&appIDOverrideKey); appid != nil {
- ticket = appid.(string) + defaultTicketSuffix
- }
- }
- // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
- if ticket == "" {
- ticket = DefaultTicket()
- }
- if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
- ticket = dri
- }
- req := &remotepb.Request{
- ServiceName: &service,
- Method: &method,
- Request: data,
- RequestId: &ticket,
- }
- hreqBody, err := proto.Marshal(req)
- if err != nil {
- return err
- }
-
- hrespBody, err := c.post(hreqBody, timeout)
- if err != nil {
- return err
- }
-
- res := &remotepb.Response{}
- if err := proto.Unmarshal(hrespBody, res); err != nil {
- return err
- }
- if res.RpcError != nil {
- ce := &CallError{
- Detail: res.RpcError.GetDetail(),
- Code: *res.RpcError.Code,
- }
- switch remotepb.RpcError_ErrorCode(ce.Code) {
- case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
- ce.Timeout = true
- }
- return ce
- }
- if res.ApplicationError != nil {
- return &APIError{
- Service: *req.ServiceName,
- Detail: res.ApplicationError.GetDetail(),
- Code: *res.ApplicationError.Code,
- }
- }
- if res.Exception != nil || res.JavaException != nil {
- // This shouldn't happen, but let's be defensive.
- return &CallError{
- Detail: "service bridge returned exception",
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return proto.Unmarshal(res.Response, out)
-}
-
-func (c *context) Request() *http.Request {
- return c.req
-}
-
-func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
- // Truncate long log lines.
- // TODO(dsymonds): Check if this is still necessary.
- const lim = 8 << 10
- if len(*ll.Message) > lim {
- suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
- ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
- }
-
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
- c.pendingLogs.Unlock()
-}
-
-var logLevelName = map[int64]string{
- 0: "DEBUG",
- 1: "INFO",
- 2: "WARNING",
- 3: "ERROR",
- 4: "CRITICAL",
-}
-
-func logf(c *context, level int64, format string, args ...interface{}) {
- if c == nil {
- panic("not an App Engine context")
- }
- s := fmt.Sprintf(format, args...)
- s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
- c.addLogLine(&logpb.UserAppLogLine{
- TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
- Level: &level,
- Message: &s,
- })
- // Only duplicate log to stderr if not running on App Engine second generation
- if !IsSecondGen() {
- log.Print(logLevelName[level] + ": " + s)
- }
-}
-
-// flushLog attempts to flush any pending logs to the appserver.
-// It should not be called concurrently.
-func (c *context) flushLog(force bool) (flushed bool) {
- c.pendingLogs.Lock()
- // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
- n, rem := 0, 30<<20
- for ; n < len(c.pendingLogs.lines); n++ {
- ll := c.pendingLogs.lines[n]
- // Each log line will require about 3 bytes of overhead.
- nb := proto.Size(ll) + 3
- if nb > rem {
- break
- }
- rem -= nb
- }
- lines := c.pendingLogs.lines[:n]
- c.pendingLogs.lines = c.pendingLogs.lines[n:]
- c.pendingLogs.Unlock()
-
- if len(lines) == 0 && !force {
- // Nothing to flush.
- return false
- }
-
- rescueLogs := false
- defer func() {
- if rescueLogs {
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
- c.pendingLogs.Unlock()
- }
- }()
-
- buf, err := proto.Marshal(&logpb.UserAppLogGroup{
- LogLine: lines,
- })
- if err != nil {
- log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
- rescueLogs = true
- return false
- }
-
- req := &logpb.FlushRequest{
- Logs: buf,
- }
- res := &basepb.VoidProto{}
- c.pendingLogs.Lock()
- c.pendingLogs.flushes++
- c.pendingLogs.Unlock()
- if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
- log.Printf("internal.flushLog: Flush RPC: %v", err)
- rescueLogs = true
- return false
- }
- return true
-}
-
-const (
- // Log flushing parameters.
- flushInterval = 1 * time.Second
- forceFlushInterval = 60 * time.Second
-)
-
-func (c *context) logFlusher(stop <-chan int) {
- lastFlush := time.Now()
- tick := time.NewTicker(flushInterval)
- for {
- select {
- case <-stop:
- // Request finished.
- tick.Stop()
- return
- case <-tick.C:
- force := time.Now().Sub(lastFlush) > forceFlushInterval
- if c.flushLog(force) {
- lastFlush = time.Now()
- }
- }
- }
-}
-
-func ContextForTesting(req *http.Request) netcontext.Context {
- return toContext(&context{req: req})
-}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
deleted file mode 100644
index f0f40b2e3..000000000
--- a/vendor/google.golang.org/appengine/internal/api_classic.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import (
- "errors"
- "fmt"
- "net/http"
- "time"
-
- "appengine"
- "appengine_internal"
- basepb "appengine_internal/base"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-)
-
-var contextKey = "holds an appengine.Context"
-
-// fromContext returns the App Engine context or nil if ctx is not
-// derived from an App Engine context.
-func fromContext(ctx netcontext.Context) appengine.Context {
- c, _ := ctx.Value(&contextKey).(appengine.Context)
- return c
-}
-
-// This is only for classic App Engine adapters.
-func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
- c := fromContext(ctx)
- if c == nil {
- return nil, errNotAppEngineContext
- }
- return c, nil
-}
-
-func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
-
- s := &basepb.StringProto{}
- c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
- if ns := s.GetValue(); ns != "" {
- ctx = NamespacedContext(ctx, ns)
- }
-
- return ctx
-}
-
-func IncomingHeaders(ctx netcontext.Context) http.Header {
- if c := fromContext(ctx); c != nil {
- if req, ok := c.Request().(*http.Request); ok {
- return req.Header
- }
- }
- return nil
-}
-
-func ReqContext(req *http.Request) netcontext.Context {
- return WithContext(netcontext.Background(), req)
-}
-
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
- c := appengine.NewContext(req)
- return withContext(parent, c)
-}
-
-type testingContext struct {
- appengine.Context
-
- req *http.Request
-}
-
-func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
-func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
- if service == "__go__" && method == "GetNamespace" {
- return nil
- }
- return fmt.Errorf("testingContext: unsupported Call")
-}
-func (t *testingContext) Request() interface{} { return t.req }
-
-func ContextForTesting(req *http.Request) netcontext.Context {
- return withContext(netcontext.Background(), &testingContext{req: req})
-}
-
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
- if ns := NamespaceFromContext(ctx); ns != "" {
- if fn, ok := NamespaceMods[service]; ok {
- fn(in, ns)
- }
- }
-
- if f, ctx, ok := callOverrideFromContext(ctx); ok {
- return f(ctx, service, method, in, out)
- }
-
- // Handle already-done contexts quickly.
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- c := fromContext(ctx)
- if c == nil {
- // Give a good error message rather than a panic lower down.
- return errNotAppEngineContext
- }
-
- // Apply transaction modifications if we're in a transaction.
- if t := transactionFromContext(ctx); t != nil {
- if t.finished {
- return errors.New("transaction context has expired")
- }
- applyTransaction(in, &t.transaction)
- }
-
- var opts *appengine_internal.CallOptions
- if d, ok := ctx.Deadline(); ok {
- opts = &appengine_internal.CallOptions{
- Timeout: d.Sub(time.Now()),
- }
- }
-
- err := c.Call(service, method, in, out, opts)
- switch v := err.(type) {
- case *appengine_internal.APIError:
- return &APIError{
- Service: v.Service,
- Detail: v.Detail,
- Code: v.Code,
- }
- case *appengine_internal.CallError:
- return &CallError{
- Detail: v.Detail,
- Code: v.Code,
- Timeout: v.Timeout,
- }
- }
- return err
-}
-
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- panic("handleHTTP called; this should be impossible")
-}
-
-func logf(c appengine.Context, level int64, format string, args ...interface{}) {
- var fn func(format string, args ...interface{})
- switch level {
- case 0:
- fn = c.Debugf
- case 1:
- fn = c.Infof
- case 2:
- fn = c.Warningf
- case 3:
- fn = c.Errorf
- case 4:
- fn = c.Criticalf
- default:
- // This shouldn't happen.
- fn = c.Criticalf
- }
- fn(format, args...)
-}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
deleted file mode 100644
index e0c0b214b..000000000
--- a/vendor/google.golang.org/appengine/internal/api_common.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "errors"
- "os"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-)
-
-var errNotAppEngineContext = errors.New("not an App Engine context")
-
-type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
-
-var callOverrideKey = "holds []CallOverrideFunc"
-
-func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
- // We avoid appending to any existing call override
- // so we don't risk overwriting a popped stack below.
- var cofs []CallOverrideFunc
- if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
- cofs = append(cofs, uf...)
- }
- cofs = append(cofs, f)
- return netcontext.WithValue(ctx, &callOverrideKey, cofs)
-}
-
-func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
- cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
- if len(cofs) == 0 {
- return nil, nil, false
- }
- // We found a list of overrides; grab the last, and reconstitute a
- // context that will hide it.
- f := cofs[len(cofs)-1]
- ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
- return f, ctx, true
-}
-
-type logOverrideFunc func(level int64, format string, args ...interface{})
-
-var logOverrideKey = "holds a logOverrideFunc"
-
-func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
- return netcontext.WithValue(ctx, &logOverrideKey, f)
-}
-
-var appIDOverrideKey = "holds a string, being the full app ID"
-
-func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
- return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
-}
-
-var namespaceKey = "holds the namespace string"
-
-func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
- return netcontext.WithValue(ctx, &namespaceKey, ns)
-}
-
-func NamespaceFromContext(ctx netcontext.Context) string {
- // If there's no namespace, return the empty string.
- ns, _ := ctx.Value(&namespaceKey).(string)
- return ns
-}
-
-// FullyQualifiedAppID returns the fully-qualified application ID.
-// This may contain a partition prefix (e.g. "s~" for High Replication apps),
-// or a domain prefix (e.g. "example.com:").
-func FullyQualifiedAppID(ctx netcontext.Context) string {
- if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
- return id
- }
- return fullyQualifiedAppID(ctx)
-}
-
-func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
- if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
- f(level, format, args...)
- return
- }
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- logf(c, level, format, args...)
-}
-
-// NamespacedContext wraps a Context to support namespaces.
-func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
- return withNamespace(ctx, namespace)
-}
-
-// SetTestEnv sets the env variables for testing background ticket in Flex.
-func SetTestEnv() func() {
- var environ = []struct {
- key, value string
- }{
- {"GAE_LONG_APP_ID", "my-app-id"},
- {"GAE_MINOR_VERSION", "067924799508853122"},
- {"GAE_MODULE_INSTANCE", "0"},
- {"GAE_MODULE_NAME", "default"},
- {"GAE_MODULE_VERSION", "20150612t184001"},
- }
-
- for _, v := range environ {
- old := os.Getenv(v.key)
- os.Setenv(v.key, v.value)
- v.value = old
- }
- return func() { // Restore old environment after the test completes.
- for _, v := range environ {
- if v.value == "" {
- os.Unsetenv(v.key)
- continue
- }
- os.Setenv(v.key, v.value)
- }
- }
-}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
deleted file mode 100644
index 11df8c07b..000000000
--- a/vendor/google.golang.org/appengine/internal/app_id.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "strings"
-)
-
-func parseFullAppID(appid string) (partition, domain, displayID string) {
- if i := strings.Index(appid, "~"); i != -1 {
- partition, appid = appid[:i], appid[i+1:]
- }
- if i := strings.Index(appid, ":"); i != -1 {
- domain, appid = appid[:i], appid[i+1:]
- }
- return partition, domain, appid
-}
-
-// appID returns "appid" or "domain.com:appid".
-func appID(fullAppID string) string {
- _, dom, dis := parseFullAppID(fullAppID)
- if dom != "" {
- return dom + ":" + dis
- }
- return dis
-}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
deleted file mode 100644
index db4777e68..000000000
--- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/base/api_base.proto
-
-package base
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type StringProto struct {
- Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StringProto) Reset() { *m = StringProto{} }
-func (m *StringProto) String() string { return proto.CompactTextString(m) }
-func (*StringProto) ProtoMessage() {}
-func (*StringProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
-}
-func (m *StringProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StringProto.Unmarshal(m, b)
-}
-func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
-}
-func (dst *StringProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StringProto.Merge(dst, src)
-}
-func (m *StringProto) XXX_Size() int {
- return xxx_messageInfo_StringProto.Size(m)
-}
-func (m *StringProto) XXX_DiscardUnknown() {
- xxx_messageInfo_StringProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StringProto proto.InternalMessageInfo
-
-func (m *StringProto) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-type Integer32Proto struct {
- Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
-func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer32Proto) ProtoMessage() {}
-func (*Integer32Proto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
-}
-func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
-}
-func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
-}
-func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Integer32Proto.Merge(dst, src)
-}
-func (m *Integer32Proto) XXX_Size() int {
- return xxx_messageInfo_Integer32Proto.Size(m)
-}
-func (m *Integer32Proto) XXX_DiscardUnknown() {
- xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
-
-func (m *Integer32Proto) GetValue() int32 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Integer64Proto struct {
- Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
-func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer64Proto) ProtoMessage() {}
-func (*Integer64Proto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
-}
-func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
-}
-func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
-}
-func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Integer64Proto.Merge(dst, src)
-}
-func (m *Integer64Proto) XXX_Size() int {
- return xxx_messageInfo_Integer64Proto.Size(m)
-}
-func (m *Integer64Proto) XXX_DiscardUnknown() {
- xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
-
-func (m *Integer64Proto) GetValue() int64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type BoolProto struct {
- Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BoolProto) Reset() { *m = BoolProto{} }
-func (m *BoolProto) String() string { return proto.CompactTextString(m) }
-func (*BoolProto) ProtoMessage() {}
-func (*BoolProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
-}
-func (m *BoolProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BoolProto.Unmarshal(m, b)
-}
-func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
-}
-func (dst *BoolProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BoolProto.Merge(dst, src)
-}
-func (m *BoolProto) XXX_Size() int {
- return xxx_messageInfo_BoolProto.Size(m)
-}
-func (m *BoolProto) XXX_DiscardUnknown() {
- xxx_messageInfo_BoolProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BoolProto proto.InternalMessageInfo
-
-func (m *BoolProto) GetValue() bool {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return false
-}
-
-type DoubleProto struct {
- Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DoubleProto) Reset() { *m = DoubleProto{} }
-func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
-func (*DoubleProto) ProtoMessage() {}
-func (*DoubleProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
-}
-func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
-}
-func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
-}
-func (dst *DoubleProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DoubleProto.Merge(dst, src)
-}
-func (m *DoubleProto) XXX_Size() int {
- return xxx_messageInfo_DoubleProto.Size(m)
-}
-func (m *DoubleProto) XXX_DiscardUnknown() {
- xxx_messageInfo_DoubleProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
-
-func (m *DoubleProto) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type BytesProto struct {
- Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BytesProto) Reset() { *m = BytesProto{} }
-func (m *BytesProto) String() string { return proto.CompactTextString(m) }
-func (*BytesProto) ProtoMessage() {}
-func (*BytesProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
-}
-func (m *BytesProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BytesProto.Unmarshal(m, b)
-}
-func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
-}
-func (dst *BytesProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BytesProto.Merge(dst, src)
-}
-func (m *BytesProto) XXX_Size() int {
- return xxx_messageInfo_BytesProto.Size(m)
-}
-func (m *BytesProto) XXX_DiscardUnknown() {
- xxx_messageInfo_BytesProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BytesProto proto.InternalMessageInfo
-
-func (m *BytesProto) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type VoidProto struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *VoidProto) Reset() { *m = VoidProto{} }
-func (m *VoidProto) String() string { return proto.CompactTextString(m) }
-func (*VoidProto) ProtoMessage() {}
-func (*VoidProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
-}
-func (m *VoidProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_VoidProto.Unmarshal(m, b)
-}
-func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
-}
-func (dst *VoidProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VoidProto.Merge(dst, src)
-}
-func (m *VoidProto) XXX_Size() int {
- return xxx_messageInfo_VoidProto.Size(m)
-}
-func (m *VoidProto) XXX_DiscardUnknown() {
- xxx_messageInfo_VoidProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VoidProto proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
- proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
- proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
- proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
- proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
- proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
- proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
-}
-
-var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
- // 199 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
- 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
- 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
- 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
- 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
- 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
- 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
- 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
- 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
- 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
- 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
- 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
- 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
deleted file mode 100644
index 56cd7a3ca..000000000
--- a/vendor/google.golang.org/appengine/internal/base/api_base.proto
+++ /dev/null
@@ -1,33 +0,0 @@
-// Built-in base types for API calls. Primarily useful as return types.
-
-syntax = "proto2";
-option go_package = "base";
-
-package appengine.base;
-
-message StringProto {
- required string value = 1;
-}
-
-message Integer32Proto {
- required int32 value = 1;
-}
-
-message Integer64Proto {
- required int64 value = 1;
-}
-
-message BoolProto {
- required bool value = 1;
-}
-
-message DoubleProto {
- required double value = 1;
-}
-
-message BytesProto {
- required bytes value = 1 [ctype=CORD];
-}
-
-message VoidProto {
-}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
deleted file mode 100644
index 2fb748289..000000000
--- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
+++ /dev/null
@@ -1,4367 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
-
-package datastore
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Property_Meaning int32
-
-const (
- Property_NO_MEANING Property_Meaning = 0
- Property_BLOB Property_Meaning = 14
- Property_TEXT Property_Meaning = 15
- Property_BYTESTRING Property_Meaning = 16
- Property_ATOM_CATEGORY Property_Meaning = 1
- Property_ATOM_LINK Property_Meaning = 2
- Property_ATOM_TITLE Property_Meaning = 3
- Property_ATOM_CONTENT Property_Meaning = 4
- Property_ATOM_SUMMARY Property_Meaning = 5
- Property_ATOM_AUTHOR Property_Meaning = 6
- Property_GD_WHEN Property_Meaning = 7
- Property_GD_EMAIL Property_Meaning = 8
- Property_GEORSS_POINT Property_Meaning = 9
- Property_GD_IM Property_Meaning = 10
- Property_GD_PHONENUMBER Property_Meaning = 11
- Property_GD_POSTALADDRESS Property_Meaning = 12
- Property_GD_RATING Property_Meaning = 13
- Property_BLOBKEY Property_Meaning = 17
- Property_ENTITY_PROTO Property_Meaning = 19
- Property_INDEX_VALUE Property_Meaning = 18
-)
-
-var Property_Meaning_name = map[int32]string{
- 0: "NO_MEANING",
- 14: "BLOB",
- 15: "TEXT",
- 16: "BYTESTRING",
- 1: "ATOM_CATEGORY",
- 2: "ATOM_LINK",
- 3: "ATOM_TITLE",
- 4: "ATOM_CONTENT",
- 5: "ATOM_SUMMARY",
- 6: "ATOM_AUTHOR",
- 7: "GD_WHEN",
- 8: "GD_EMAIL",
- 9: "GEORSS_POINT",
- 10: "GD_IM",
- 11: "GD_PHONENUMBER",
- 12: "GD_POSTALADDRESS",
- 13: "GD_RATING",
- 17: "BLOBKEY",
- 19: "ENTITY_PROTO",
- 18: "INDEX_VALUE",
-}
-var Property_Meaning_value = map[string]int32{
- "NO_MEANING": 0,
- "BLOB": 14,
- "TEXT": 15,
- "BYTESTRING": 16,
- "ATOM_CATEGORY": 1,
- "ATOM_LINK": 2,
- "ATOM_TITLE": 3,
- "ATOM_CONTENT": 4,
- "ATOM_SUMMARY": 5,
- "ATOM_AUTHOR": 6,
- "GD_WHEN": 7,
- "GD_EMAIL": 8,
- "GEORSS_POINT": 9,
- "GD_IM": 10,
- "GD_PHONENUMBER": 11,
- "GD_POSTALADDRESS": 12,
- "GD_RATING": 13,
- "BLOBKEY": 17,
- "ENTITY_PROTO": 19,
- "INDEX_VALUE": 18,
-}
-
-func (x Property_Meaning) Enum() *Property_Meaning {
- p := new(Property_Meaning)
- *p = x
- return p
-}
-func (x Property_Meaning) String() string {
- return proto.EnumName(Property_Meaning_name, int32(x))
-}
-func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
- if err != nil {
- return err
- }
- *x = Property_Meaning(value)
- return nil
-}
-func (Property_Meaning) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0}
-}
-
-type Property_FtsTokenizationOption int32
-
-const (
- Property_HTML Property_FtsTokenizationOption = 1
- Property_ATOM Property_FtsTokenizationOption = 2
-)
-
-var Property_FtsTokenizationOption_name = map[int32]string{
- 1: "HTML",
- 2: "ATOM",
-}
-var Property_FtsTokenizationOption_value = map[string]int32{
- "HTML": 1,
- "ATOM": 2,
-}
-
-func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
- p := new(Property_FtsTokenizationOption)
- *p = x
- return p
-}
-func (x Property_FtsTokenizationOption) String() string {
- return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
-}
-func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
- if err != nil {
- return err
- }
- *x = Property_FtsTokenizationOption(value)
- return nil
-}
-func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1}
-}
-
-type EntityProto_Kind int32
-
-const (
- EntityProto_GD_CONTACT EntityProto_Kind = 1
- EntityProto_GD_EVENT EntityProto_Kind = 2
- EntityProto_GD_MESSAGE EntityProto_Kind = 3
-)
-
-var EntityProto_Kind_name = map[int32]string{
- 1: "GD_CONTACT",
- 2: "GD_EVENT",
- 3: "GD_MESSAGE",
-}
-var EntityProto_Kind_value = map[string]int32{
- "GD_CONTACT": 1,
- "GD_EVENT": 2,
- "GD_MESSAGE": 3,
-}
-
-func (x EntityProto_Kind) Enum() *EntityProto_Kind {
- p := new(EntityProto_Kind)
- *p = x
- return p
-}
-func (x EntityProto_Kind) String() string {
- return proto.EnumName(EntityProto_Kind_name, int32(x))
-}
-func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
- if err != nil {
- return err
- }
- *x = EntityProto_Kind(value)
- return nil
-}
-func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0}
-}
-
-type Index_Property_Direction int32
-
-const (
- Index_Property_ASCENDING Index_Property_Direction = 1
- Index_Property_DESCENDING Index_Property_Direction = 2
-)
-
-var Index_Property_Direction_name = map[int32]string{
- 1: "ASCENDING",
- 2: "DESCENDING",
-}
-var Index_Property_Direction_value = map[string]int32{
- "ASCENDING": 1,
- "DESCENDING": 2,
-}
-
-func (x Index_Property_Direction) Enum() *Index_Property_Direction {
- p := new(Index_Property_Direction)
- *p = x
- return p
-}
-func (x Index_Property_Direction) String() string {
- return proto.EnumName(Index_Property_Direction_name, int32(x))
-}
-func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
- if err != nil {
- return err
- }
- *x = Index_Property_Direction(value)
- return nil
-}
-func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0}
-}
-
-type CompositeIndex_State int32
-
-const (
- CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
- CompositeIndex_READ_WRITE CompositeIndex_State = 2
- CompositeIndex_DELETED CompositeIndex_State = 3
- CompositeIndex_ERROR CompositeIndex_State = 4
-)
-
-var CompositeIndex_State_name = map[int32]string{
- 1: "WRITE_ONLY",
- 2: "READ_WRITE",
- 3: "DELETED",
- 4: "ERROR",
-}
-var CompositeIndex_State_value = map[string]int32{
- "WRITE_ONLY": 1,
- "READ_WRITE": 2,
- "DELETED": 3,
- "ERROR": 4,
-}
-
-func (x CompositeIndex_State) Enum() *CompositeIndex_State {
- p := new(CompositeIndex_State)
- *p = x
- return p
-}
-func (x CompositeIndex_State) String() string {
- return proto.EnumName(CompositeIndex_State_name, int32(x))
-}
-func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
- if err != nil {
- return err
- }
- *x = CompositeIndex_State(value)
- return nil
-}
-func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0}
-}
-
-type Snapshot_Status int32
-
-const (
- Snapshot_INACTIVE Snapshot_Status = 0
- Snapshot_ACTIVE Snapshot_Status = 1
-)
-
-var Snapshot_Status_name = map[int32]string{
- 0: "INACTIVE",
- 1: "ACTIVE",
-}
-var Snapshot_Status_value = map[string]int32{
- "INACTIVE": 0,
- "ACTIVE": 1,
-}
-
-func (x Snapshot_Status) Enum() *Snapshot_Status {
- p := new(Snapshot_Status)
- *p = x
- return p
-}
-func (x Snapshot_Status) String() string {
- return proto.EnumName(Snapshot_Status_name, int32(x))
-}
-func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
- if err != nil {
- return err
- }
- *x = Snapshot_Status(value)
- return nil
-}
-func (Snapshot_Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0}
-}
-
-type Query_Hint int32
-
-const (
- Query_ORDER_FIRST Query_Hint = 1
- Query_ANCESTOR_FIRST Query_Hint = 2
- Query_FILTER_FIRST Query_Hint = 3
-)
-
-var Query_Hint_name = map[int32]string{
- 1: "ORDER_FIRST",
- 2: "ANCESTOR_FIRST",
- 3: "FILTER_FIRST",
-}
-var Query_Hint_value = map[string]int32{
- "ORDER_FIRST": 1,
- "ANCESTOR_FIRST": 2,
- "FILTER_FIRST": 3,
-}
-
-func (x Query_Hint) Enum() *Query_Hint {
- p := new(Query_Hint)
- *p = x
- return p
-}
-func (x Query_Hint) String() string {
- return proto.EnumName(Query_Hint_name, int32(x))
-}
-func (x *Query_Hint) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
- if err != nil {
- return err
- }
- *x = Query_Hint(value)
- return nil
-}
-func (Query_Hint) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
-}
-
-type Query_Filter_Operator int32
-
-const (
- Query_Filter_LESS_THAN Query_Filter_Operator = 1
- Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
- Query_Filter_GREATER_THAN Query_Filter_Operator = 3
- Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
- Query_Filter_EQUAL Query_Filter_Operator = 5
- Query_Filter_IN Query_Filter_Operator = 6
- Query_Filter_EXISTS Query_Filter_Operator = 7
-)
-
-var Query_Filter_Operator_name = map[int32]string{
- 1: "LESS_THAN",
- 2: "LESS_THAN_OR_EQUAL",
- 3: "GREATER_THAN",
- 4: "GREATER_THAN_OR_EQUAL",
- 5: "EQUAL",
- 6: "IN",
- 7: "EXISTS",
-}
-var Query_Filter_Operator_value = map[string]int32{
- "LESS_THAN": 1,
- "LESS_THAN_OR_EQUAL": 2,
- "GREATER_THAN": 3,
- "GREATER_THAN_OR_EQUAL": 4,
- "EQUAL": 5,
- "IN": 6,
- "EXISTS": 7,
-}
-
-func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
- p := new(Query_Filter_Operator)
- *p = x
- return p
-}
-func (x Query_Filter_Operator) String() string {
- return proto.EnumName(Query_Filter_Operator_name, int32(x))
-}
-func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
- if err != nil {
- return err
- }
- *x = Query_Filter_Operator(value)
- return nil
-}
-func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0}
-}
-
-type Query_Order_Direction int32
-
-const (
- Query_Order_ASCENDING Query_Order_Direction = 1
- Query_Order_DESCENDING Query_Order_Direction = 2
-)
-
-var Query_Order_Direction_name = map[int32]string{
- 1: "ASCENDING",
- 2: "DESCENDING",
-}
-var Query_Order_Direction_value = map[string]int32{
- "ASCENDING": 1,
- "DESCENDING": 2,
-}
-
-func (x Query_Order_Direction) Enum() *Query_Order_Direction {
- p := new(Query_Order_Direction)
- *p = x
- return p
-}
-func (x Query_Order_Direction) String() string {
- return proto.EnumName(Query_Order_Direction_name, int32(x))
-}
-func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
- if err != nil {
- return err
- }
- *x = Query_Order_Direction(value)
- return nil
-}
-func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0}
-}
-
-type Error_ErrorCode int32
-
-const (
- Error_BAD_REQUEST Error_ErrorCode = 1
- Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
- Error_INTERNAL_ERROR Error_ErrorCode = 3
- Error_NEED_INDEX Error_ErrorCode = 4
- Error_TIMEOUT Error_ErrorCode = 5
- Error_PERMISSION_DENIED Error_ErrorCode = 6
- Error_BIGTABLE_ERROR Error_ErrorCode = 7
- Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
- Error_CAPABILITY_DISABLED Error_ErrorCode = 9
- Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
- Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
-)
-
-var Error_ErrorCode_name = map[int32]string{
- 1: "BAD_REQUEST",
- 2: "CONCURRENT_TRANSACTION",
- 3: "INTERNAL_ERROR",
- 4: "NEED_INDEX",
- 5: "TIMEOUT",
- 6: "PERMISSION_DENIED",
- 7: "BIGTABLE_ERROR",
- 8: "COMMITTED_BUT_STILL_APPLYING",
- 9: "CAPABILITY_DISABLED",
- 10: "TRY_ALTERNATE_BACKEND",
- 11: "SAFE_TIME_TOO_OLD",
-}
-var Error_ErrorCode_value = map[string]int32{
- "BAD_REQUEST": 1,
- "CONCURRENT_TRANSACTION": 2,
- "INTERNAL_ERROR": 3,
- "NEED_INDEX": 4,
- "TIMEOUT": 5,
- "PERMISSION_DENIED": 6,
- "BIGTABLE_ERROR": 7,
- "COMMITTED_BUT_STILL_APPLYING": 8,
- "CAPABILITY_DISABLED": 9,
- "TRY_ALTERNATE_BACKEND": 10,
- "SAFE_TIME_TOO_OLD": 11,
-}
-
-func (x Error_ErrorCode) Enum() *Error_ErrorCode {
- p := new(Error_ErrorCode)
- *p = x
- return p
-}
-func (x Error_ErrorCode) String() string {
- return proto.EnumName(Error_ErrorCode_name, int32(x))
-}
-func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
- if err != nil {
- return err
- }
- *x = Error_ErrorCode(value)
- return nil
-}
-func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0}
-}
-
-type PutRequest_AutoIdPolicy int32
-
-const (
- PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
- PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
-)
-
-var PutRequest_AutoIdPolicy_name = map[int32]string{
- 0: "CURRENT",
- 1: "SEQUENTIAL",
-}
-var PutRequest_AutoIdPolicy_value = map[string]int32{
- "CURRENT": 0,
- "SEQUENTIAL": 1,
-}
-
-func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
- p := new(PutRequest_AutoIdPolicy)
- *p = x
- return p
-}
-func (x PutRequest_AutoIdPolicy) String() string {
- return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
-}
-func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
- if err != nil {
- return err
- }
- *x = PutRequest_AutoIdPolicy(value)
- return nil
-}
-func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0}
-}
-
-type BeginTransactionRequest_TransactionMode int32
-
-const (
- BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0
- BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1
- BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2
-)
-
-var BeginTransactionRequest_TransactionMode_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "READ_ONLY",
- 2: "READ_WRITE",
-}
-var BeginTransactionRequest_TransactionMode_value = map[string]int32{
- "UNKNOWN": 0,
- "READ_ONLY": 1,
- "READ_WRITE": 2,
-}
-
-func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode {
- p := new(BeginTransactionRequest_TransactionMode)
- *p = x
- return p
-}
-func (x BeginTransactionRequest_TransactionMode) String() string {
- return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x))
-}
-func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode")
- if err != nil {
- return err
- }
- *x = BeginTransactionRequest_TransactionMode(value)
- return nil
-}
-func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0}
-}
-
-type Action struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Action) Reset() { *m = Action{} }
-func (m *Action) String() string { return proto.CompactTextString(m) }
-func (*Action) ProtoMessage() {}
-func (*Action) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0}
-}
-func (m *Action) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Action.Unmarshal(m, b)
-}
-func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Action.Marshal(b, m, deterministic)
-}
-func (dst *Action) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Action.Merge(dst, src)
-}
-func (m *Action) XXX_Size() int {
- return xxx_messageInfo_Action.Size(m)
-}
-func (m *Action) XXX_DiscardUnknown() {
- xxx_messageInfo_Action.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Action proto.InternalMessageInfo
-
-type PropertyValue struct {
- Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
- BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
- StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
- DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
- Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
- Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
- Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue) Reset() { *m = PropertyValue{} }
-func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue) ProtoMessage() {}
-func (*PropertyValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1}
-}
-func (m *PropertyValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue.Unmarshal(m, b)
-}
-func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue.Merge(dst, src)
-}
-func (m *PropertyValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue.Size(m)
-}
-func (m *PropertyValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue proto.InternalMessageInfo
-
-func (m *PropertyValue) GetInt64Value() int64 {
- if m != nil && m.Int64Value != nil {
- return *m.Int64Value
- }
- return 0
-}
-
-func (m *PropertyValue) GetBooleanValue() bool {
- if m != nil && m.BooleanValue != nil {
- return *m.BooleanValue
- }
- return false
-}
-
-func (m *PropertyValue) GetStringValue() string {
- if m != nil && m.StringValue != nil {
- return *m.StringValue
- }
- return ""
-}
-
-func (m *PropertyValue) GetDoubleValue() float64 {
- if m != nil && m.DoubleValue != nil {
- return *m.DoubleValue
- }
- return 0
-}
-
-func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
- if m != nil {
- return m.Pointvalue
- }
- return nil
-}
-
-func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
- if m != nil {
- return m.Uservalue
- }
- return nil
-}
-
-func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
- if m != nil {
- return m.Referencevalue
- }
- return nil
-}
-
-type PropertyValue_PointValue struct {
- X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
- Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
-func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_PointValue) ProtoMessage() {}
-func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0}
-}
-func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src)
-}
-func (m *PropertyValue_PointValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_PointValue.Size(m)
-}
-func (m *PropertyValue_PointValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo
-
-func (m *PropertyValue_PointValue) GetX() float64 {
- if m != nil && m.X != nil {
- return *m.X
- }
- return 0
-}
-
-func (m *PropertyValue_PointValue) GetY() float64 {
- if m != nil && m.Y != nil {
- return *m.Y
- }
- return 0
-}
-
-type PropertyValue_UserValue struct {
- Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
-func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_UserValue) ProtoMessage() {}
-func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1}
-}
-func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src)
-}
-func (m *PropertyValue_UserValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_UserValue.Size(m)
-}
-func (m *PropertyValue_UserValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo
-
-func (m *PropertyValue_UserValue) GetEmail() string {
- if m != nil && m.Email != nil {
- return *m.Email
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetAuthDomain() string {
- if m != nil && m.AuthDomain != nil {
- return *m.AuthDomain
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
- if m != nil && m.FederatedIdentity != nil {
- return *m.FederatedIdentity
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedProvider() string {
- if m != nil && m.FederatedProvider != nil {
- return *m.FederatedProvider
- }
- return ""
-}
-
-type PropertyValue_ReferenceValue struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
-func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue) ProtoMessage() {}
-func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2}
-}
-func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src)
-}
-func (m *PropertyValue_ReferenceValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m)
-}
-func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo
-
-func (m *PropertyValue_ReferenceValue) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
- if m != nil {
- return m.Pathelement
- }
- return nil
-}
-
-type PropertyValue_ReferenceValue_PathElement struct {
- Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
- *m = PropertyValue_ReferenceValue_PathElement{}
-}
-func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
-func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0}
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-type Property struct {
- Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
- MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"`
- Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
- Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
- Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
- Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
- FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
- Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Property) Reset() { *m = Property{} }
-func (m *Property) String() string { return proto.CompactTextString(m) }
-func (*Property) ProtoMessage() {}
-func (*Property) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2}
-}
-func (m *Property) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Property.Unmarshal(m, b)
-}
-func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Property.Marshal(b, m, deterministic)
-}
-func (dst *Property) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Property.Merge(dst, src)
-}
-func (m *Property) XXX_Size() int {
- return xxx_messageInfo_Property.Size(m)
-}
-func (m *Property) XXX_DiscardUnknown() {
- xxx_messageInfo_Property.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Property proto.InternalMessageInfo
-
-const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
-const Default_Property_Searchable bool = false
-const Default_Property_Locale string = "en"
-
-func (m *Property) GetMeaning() Property_Meaning {
- if m != nil && m.Meaning != nil {
- return *m.Meaning
- }
- return Default_Property_Meaning
-}
-
-func (m *Property) GetMeaningUri() string {
- if m != nil && m.MeaningUri != nil {
- return *m.MeaningUri
- }
- return ""
-}
-
-func (m *Property) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *Property) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Property) GetMultiple() bool {
- if m != nil && m.Multiple != nil {
- return *m.Multiple
- }
- return false
-}
-
-func (m *Property) GetSearchable() bool {
- if m != nil && m.Searchable != nil {
- return *m.Searchable
- }
- return Default_Property_Searchable
-}
-
-func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
- if m != nil && m.FtsTokenizationOption != nil {
- return *m.FtsTokenizationOption
- }
- return Property_HTML
-}
-
-func (m *Property) GetLocale() string {
- if m != nil && m.Locale != nil {
- return *m.Locale
- }
- return Default_Property_Locale
-}
-
-type Path struct {
- Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Path) Reset() { *m = Path{} }
-func (m *Path) String() string { return proto.CompactTextString(m) }
-func (*Path) ProtoMessage() {}
-func (*Path) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3}
-}
-func (m *Path) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Path.Unmarshal(m, b)
-}
-func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Path.Marshal(b, m, deterministic)
-}
-func (dst *Path) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Path.Merge(dst, src)
-}
-func (m *Path) XXX_Size() int {
- return xxx_messageInfo_Path.Size(m)
-}
-func (m *Path) XXX_DiscardUnknown() {
- xxx_messageInfo_Path.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Path proto.InternalMessageInfo
-
-func (m *Path) GetElement() []*Path_Element {
- if m != nil {
- return m.Element
- }
- return nil
-}
-
-type Path_Element struct {
- Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Path_Element) Reset() { *m = Path_Element{} }
-func (m *Path_Element) String() string { return proto.CompactTextString(m) }
-func (*Path_Element) ProtoMessage() {}
-func (*Path_Element) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0}
-}
-func (m *Path_Element) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Path_Element.Unmarshal(m, b)
-}
-func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic)
-}
-func (dst *Path_Element) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Path_Element.Merge(dst, src)
-}
-func (m *Path_Element) XXX_Size() int {
- return xxx_messageInfo_Path_Element.Size(m)
-}
-func (m *Path_Element) XXX_DiscardUnknown() {
- xxx_messageInfo_Path_Element.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Path_Element proto.InternalMessageInfo
-
-func (m *Path_Element) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-func (m *Path_Element) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *Path_Element) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-type Reference struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Reference) Reset() { *m = Reference{} }
-func (m *Reference) String() string { return proto.CompactTextString(m) }
-func (*Reference) ProtoMessage() {}
-func (*Reference) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4}
-}
-func (m *Reference) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Reference.Unmarshal(m, b)
-}
-func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
-}
-func (dst *Reference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Reference.Merge(dst, src)
-}
-func (m *Reference) XXX_Size() int {
- return xxx_messageInfo_Reference.Size(m)
-}
-func (m *Reference) XXX_DiscardUnknown() {
- xxx_messageInfo_Reference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Reference proto.InternalMessageInfo
-
-func (m *Reference) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Reference) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *Reference) GetPath() *Path {
- if m != nil {
- return m.Path
- }
- return nil
-}
-
-type User struct {
- Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *User) Reset() { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage() {}
-func (*User) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5}
-}
-func (m *User) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_User.Unmarshal(m, b)
-}
-func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_User.Marshal(b, m, deterministic)
-}
-func (dst *User) XXX_Merge(src proto.Message) {
- xxx_messageInfo_User.Merge(dst, src)
-}
-func (m *User) XXX_Size() int {
- return xxx_messageInfo_User.Size(m)
-}
-func (m *User) XXX_DiscardUnknown() {
- xxx_messageInfo_User.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_User proto.InternalMessageInfo
-
-func (m *User) GetEmail() string {
- if m != nil && m.Email != nil {
- return *m.Email
- }
- return ""
-}
-
-func (m *User) GetAuthDomain() string {
- if m != nil && m.AuthDomain != nil {
- return *m.AuthDomain
- }
- return ""
-}
-
-func (m *User) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *User) GetFederatedIdentity() string {
- if m != nil && m.FederatedIdentity != nil {
- return *m.FederatedIdentity
- }
- return ""
-}
-
-func (m *User) GetFederatedProvider() string {
- if m != nil && m.FederatedProvider != nil {
- return *m.FederatedProvider
- }
- return ""
-}
-
-type EntityProto struct {
- Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
- EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
- Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
- Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
- KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
- Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EntityProto) Reset() { *m = EntityProto{} }
-func (m *EntityProto) String() string { return proto.CompactTextString(m) }
-func (*EntityProto) ProtoMessage() {}
-func (*EntityProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6}
-}
-func (m *EntityProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EntityProto.Unmarshal(m, b)
-}
-func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic)
-}
-func (dst *EntityProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EntityProto.Merge(dst, src)
-}
-func (m *EntityProto) XXX_Size() int {
- return xxx_messageInfo_EntityProto.Size(m)
-}
-func (m *EntityProto) XXX_DiscardUnknown() {
- xxx_messageInfo_EntityProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EntityProto proto.InternalMessageInfo
-
-func (m *EntityProto) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *EntityProto) GetEntityGroup() *Path {
- if m != nil {
- return m.EntityGroup
- }
- return nil
-}
-
-func (m *EntityProto) GetOwner() *User {
- if m != nil {
- return m.Owner
- }
- return nil
-}
-
-func (m *EntityProto) GetKind() EntityProto_Kind {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return EntityProto_GD_CONTACT
-}
-
-func (m *EntityProto) GetKindUri() string {
- if m != nil && m.KindUri != nil {
- return *m.KindUri
- }
- return ""
-}
-
-func (m *EntityProto) GetProperty() []*Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-func (m *EntityProto) GetRawProperty() []*Property {
- if m != nil {
- return m.RawProperty
- }
- return nil
-}
-
-func (m *EntityProto) GetRank() int32 {
- if m != nil && m.Rank != nil {
- return *m.Rank
- }
- return 0
-}
-
-type CompositeProperty struct {
- IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
- Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
-func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
-func (*CompositeProperty) ProtoMessage() {}
-func (*CompositeProperty) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7}
-}
-func (m *CompositeProperty) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompositeProperty.Unmarshal(m, b)
-}
-func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic)
-}
-func (dst *CompositeProperty) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompositeProperty.Merge(dst, src)
-}
-func (m *CompositeProperty) XXX_Size() int {
- return xxx_messageInfo_CompositeProperty.Size(m)
-}
-func (m *CompositeProperty) XXX_DiscardUnknown() {
- xxx_messageInfo_CompositeProperty.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo
-
-func (m *CompositeProperty) GetIndexId() int64 {
- if m != nil && m.IndexId != nil {
- return *m.IndexId
- }
- return 0
-}
-
-func (m *CompositeProperty) GetValue() []string {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type Index struct {
- EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
- Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
- Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Index) Reset() { *m = Index{} }
-func (m *Index) String() string { return proto.CompactTextString(m) }
-func (*Index) ProtoMessage() {}
-func (*Index) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8}
-}
-func (m *Index) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Index.Unmarshal(m, b)
-}
-func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Index.Marshal(b, m, deterministic)
-}
-func (dst *Index) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Index.Merge(dst, src)
-}
-func (m *Index) XXX_Size() int {
- return xxx_messageInfo_Index.Size(m)
-}
-func (m *Index) XXX_DiscardUnknown() {
- xxx_messageInfo_Index.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Index proto.InternalMessageInfo
-
-func (m *Index) GetEntityType() string {
- if m != nil && m.EntityType != nil {
- return *m.EntityType
- }
- return ""
-}
-
-func (m *Index) GetAncestor() bool {
- if m != nil && m.Ancestor != nil {
- return *m.Ancestor
- }
- return false
-}
-
-func (m *Index) GetProperty() []*Index_Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-type Index_Property struct {
- Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
- Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Index_Property) Reset() { *m = Index_Property{} }
-func (m *Index_Property) String() string { return proto.CompactTextString(m) }
-func (*Index_Property) ProtoMessage() {}
-func (*Index_Property) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0}
-}
-func (m *Index_Property) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Index_Property.Unmarshal(m, b)
-}
-func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic)
-}
-func (dst *Index_Property) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Index_Property.Merge(dst, src)
-}
-func (m *Index_Property) XXX_Size() int {
- return xxx_messageInfo_Index_Property.Size(m)
-}
-func (m *Index_Property) XXX_DiscardUnknown() {
- xxx_messageInfo_Index_Property.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Index_Property proto.InternalMessageInfo
-
-const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
-
-func (m *Index_Property) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *Index_Property) GetDirection() Index_Property_Direction {
- if m != nil && m.Direction != nil {
- return *m.Direction
- }
- return Default_Index_Property_Direction
-}
-
-type CompositeIndex struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
- Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
- State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
- OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
-func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndex) ProtoMessage() {}
-func (*CompositeIndex) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9}
-}
-func (m *CompositeIndex) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompositeIndex.Unmarshal(m, b)
-}
-func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic)
-}
-func (dst *CompositeIndex) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompositeIndex.Merge(dst, src)
-}
-func (m *CompositeIndex) XXX_Size() int {
- return xxx_messageInfo_CompositeIndex.Size(m)
-}
-func (m *CompositeIndex) XXX_DiscardUnknown() {
- xxx_messageInfo_CompositeIndex.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo
-
-const Default_CompositeIndex_OnlyUseIfRequired bool = false
-
-func (m *CompositeIndex) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *CompositeIndex) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *CompositeIndex) GetDefinition() *Index {
- if m != nil {
- return m.Definition
- }
- return nil
-}
-
-func (m *CompositeIndex) GetState() CompositeIndex_State {
- if m != nil && m.State != nil {
- return *m.State
- }
- return CompositeIndex_WRITE_ONLY
-}
-
-func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
- if m != nil && m.OnlyUseIfRequired != nil {
- return *m.OnlyUseIfRequired
- }
- return Default_CompositeIndex_OnlyUseIfRequired
-}
-
-type IndexPostfix struct {
- IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
- Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
-func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix) ProtoMessage() {}
-func (*IndexPostfix) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10}
-}
-func (m *IndexPostfix) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_IndexPostfix.Unmarshal(m, b)
-}
-func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic)
-}
-func (dst *IndexPostfix) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IndexPostfix.Merge(dst, src)
-}
-func (m *IndexPostfix) XXX_Size() int {
- return xxx_messageInfo_IndexPostfix.Size(m)
-}
-func (m *IndexPostfix) XXX_DiscardUnknown() {
- xxx_messageInfo_IndexPostfix.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo
-
-const Default_IndexPostfix_Before bool = true
-
-func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
- if m != nil {
- return m.IndexValue
- }
- return nil
-}
-
-func (m *IndexPostfix) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *IndexPostfix) GetBefore() bool {
- if m != nil && m.Before != nil {
- return *m.Before
- }
- return Default_IndexPostfix_Before
-}
-
-type IndexPostfix_IndexValue struct {
- PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
- Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
-func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix_IndexValue) ProtoMessage() {}
-func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0}
-}
-func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b)
-}
-func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic)
-}
-func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src)
-}
-func (m *IndexPostfix_IndexValue) XXX_Size() int {
- return xxx_messageInfo_IndexPostfix_IndexValue.Size(m)
-}
-func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() {
- xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo
-
-func (m *IndexPostfix_IndexValue) GetPropertyName() string {
- if m != nil && m.PropertyName != nil {
- return *m.PropertyName
- }
- return ""
-}
-
-func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type IndexPosition struct {
- Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *IndexPosition) Reset() { *m = IndexPosition{} }
-func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
-func (*IndexPosition) ProtoMessage() {}
-func (*IndexPosition) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11}
-}
-func (m *IndexPosition) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_IndexPosition.Unmarshal(m, b)
-}
-func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic)
-}
-func (dst *IndexPosition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IndexPosition.Merge(dst, src)
-}
-func (m *IndexPosition) XXX_Size() int {
- return xxx_messageInfo_IndexPosition.Size(m)
-}
-func (m *IndexPosition) XXX_DiscardUnknown() {
- xxx_messageInfo_IndexPosition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPosition proto.InternalMessageInfo
-
-const Default_IndexPosition_Before bool = true
-
-func (m *IndexPosition) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *IndexPosition) GetBefore() bool {
- if m != nil && m.Before != nil {
- return *m.Before
- }
- return Default_IndexPosition_Before
-}
-
-type Snapshot struct {
- Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Snapshot.Unmarshal(m, b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
-}
-func (dst *Snapshot) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Snapshot.Merge(dst, src)
-}
-func (m *Snapshot) XXX_Size() int {
- return xxx_messageInfo_Snapshot.Size(m)
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
- xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-func (m *Snapshot) GetTs() int64 {
- if m != nil && m.Ts != nil {
- return *m.Ts
- }
- return 0
-}
-
-type InternalHeader struct {
- Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *InternalHeader) Reset() { *m = InternalHeader{} }
-func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
-func (*InternalHeader) ProtoMessage() {}
-func (*InternalHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13}
-}
-func (m *InternalHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_InternalHeader.Unmarshal(m, b)
-}
-func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic)
-}
-func (dst *InternalHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InternalHeader.Merge(dst, src)
-}
-func (m *InternalHeader) XXX_Size() int {
- return xxx_messageInfo_InternalHeader.Size(m)
-}
-func (m *InternalHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_InternalHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalHeader proto.InternalMessageInfo
-
-func (m *InternalHeader) GetQos() string {
- if m != nil && m.Qos != nil {
- return *m.Qos
- }
- return ""
-}
-
-type Transaction struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
- App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
- MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Transaction) Reset() { *m = Transaction{} }
-func (m *Transaction) String() string { return proto.CompactTextString(m) }
-func (*Transaction) ProtoMessage() {}
-func (*Transaction) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14}
-}
-func (m *Transaction) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Transaction.Unmarshal(m, b)
-}
-func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
-}
-func (dst *Transaction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Transaction.Merge(dst, src)
-}
-func (m *Transaction) XXX_Size() int {
- return xxx_messageInfo_Transaction.Size(m)
-}
-func (m *Transaction) XXX_DiscardUnknown() {
- xxx_messageInfo_Transaction.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Transaction proto.InternalMessageInfo
-
-const Default_Transaction_MarkChanges bool = false
-
-func (m *Transaction) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *Transaction) GetHandle() uint64 {
- if m != nil && m.Handle != nil {
- return *m.Handle
- }
- return 0
-}
-
-func (m *Transaction) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Transaction) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_Transaction_MarkChanges
-}
-
-type Query struct {
- Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
- Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
- SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
- Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
- Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
- Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
- EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
- KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
- Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
- Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
- FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
- PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
- GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
- Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
- MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
- SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
- PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Query) Reset() { *m = Query{} }
-func (m *Query) String() string { return proto.CompactTextString(m) }
-func (*Query) ProtoMessage() {}
-func (*Query) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15}
-}
-func (m *Query) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Query.Unmarshal(m, b)
-}
-func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Query.Marshal(b, m, deterministic)
-}
-func (dst *Query) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Query.Merge(dst, src)
-}
-func (m *Query) XXX_Size() int {
- return xxx_messageInfo_Query.Size(m)
-}
-func (m *Query) XXX_DiscardUnknown() {
- xxx_messageInfo_Query.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query proto.InternalMessageInfo
-
-const Default_Query_Offset int32 = 0
-const Default_Query_RequirePerfectPlan bool = false
-const Default_Query_KeysOnly bool = false
-const Default_Query_Compile bool = false
-const Default_Query_PersistOffset bool = false
-
-func (m *Query) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *Query) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Query) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *Query) GetKind() string {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return ""
-}
-
-func (m *Query) GetAncestor() *Reference {
- if m != nil {
- return m.Ancestor
- }
- return nil
-}
-
-func (m *Query) GetFilter() []*Query_Filter {
- if m != nil {
- return m.Filter
- }
- return nil
-}
-
-func (m *Query) GetSearchQuery() string {
- if m != nil && m.SearchQuery != nil {
- return *m.SearchQuery
- }
- return ""
-}
-
-func (m *Query) GetOrder() []*Query_Order {
- if m != nil {
- return m.Order
- }
- return nil
-}
-
-func (m *Query) GetHint() Query_Hint {
- if m != nil && m.Hint != nil {
- return *m.Hint
- }
- return Query_ORDER_FIRST
-}
-
-func (m *Query) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *Query) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_Query_Offset
-}
-
-func (m *Query) GetLimit() int32 {
- if m != nil && m.Limit != nil {
- return *m.Limit
- }
- return 0
-}
-
-func (m *Query) GetCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.CompiledCursor
- }
- return nil
-}
-
-func (m *Query) GetEndCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.EndCompiledCursor
- }
- return nil
-}
-
-func (m *Query) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *Query) GetRequirePerfectPlan() bool {
- if m != nil && m.RequirePerfectPlan != nil {
- return *m.RequirePerfectPlan
- }
- return Default_Query_RequirePerfectPlan
-}
-
-func (m *Query) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return Default_Query_KeysOnly
-}
-
-func (m *Query) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *Query) GetCompile() bool {
- if m != nil && m.Compile != nil {
- return *m.Compile
- }
- return Default_Query_Compile
-}
-
-func (m *Query) GetFailoverMs() int64 {
- if m != nil && m.FailoverMs != nil {
- return *m.FailoverMs
- }
- return 0
-}
-
-func (m *Query) GetStrong() bool {
- if m != nil && m.Strong != nil {
- return *m.Strong
- }
- return false
-}
-
-func (m *Query) GetPropertyName() []string {
- if m != nil {
- return m.PropertyName
- }
- return nil
-}
-
-func (m *Query) GetGroupByPropertyName() []string {
- if m != nil {
- return m.GroupByPropertyName
- }
- return nil
-}
-
-func (m *Query) GetDistinct() bool {
- if m != nil && m.Distinct != nil {
- return *m.Distinct
- }
- return false
-}
-
-func (m *Query) GetMinSafeTimeSeconds() int64 {
- if m != nil && m.MinSafeTimeSeconds != nil {
- return *m.MinSafeTimeSeconds
- }
- return 0
-}
-
-func (m *Query) GetSafeReplicaName() []string {
- if m != nil {
- return m.SafeReplicaName
- }
- return nil
-}
-
-func (m *Query) GetPersistOffset() bool {
- if m != nil && m.PersistOffset != nil {
- return *m.PersistOffset
- }
- return Default_Query_PersistOffset
-}
-
-type Query_Filter struct {
- Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Query_Filter) Reset() { *m = Query_Filter{} }
-func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
-func (*Query_Filter) ProtoMessage() {}
-func (*Query_Filter) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
-}
-func (m *Query_Filter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Query_Filter.Unmarshal(m, b)
-}
-func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic)
-}
-func (dst *Query_Filter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Query_Filter.Merge(dst, src)
-}
-func (m *Query_Filter) XXX_Size() int {
- return xxx_messageInfo_Query_Filter.Size(m)
-}
-func (m *Query_Filter) XXX_DiscardUnknown() {
- xxx_messageInfo_Query_Filter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query_Filter proto.InternalMessageInfo
-
-func (m *Query_Filter) GetOp() Query_Filter_Operator {
- if m != nil && m.Op != nil {
- return *m.Op
- }
- return Query_Filter_LESS_THAN
-}
-
-func (m *Query_Filter) GetProperty() []*Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-type Query_Order struct {
- Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
- Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Query_Order) Reset() { *m = Query_Order{} }
-func (m *Query_Order) String() string { return proto.CompactTextString(m) }
-func (*Query_Order) ProtoMessage() {}
-func (*Query_Order) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1}
-}
-func (m *Query_Order) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Query_Order.Unmarshal(m, b)
-}
-func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic)
-}
-func (dst *Query_Order) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Query_Order.Merge(dst, src)
-}
-func (m *Query_Order) XXX_Size() int {
- return xxx_messageInfo_Query_Order.Size(m)
-}
-func (m *Query_Order) XXX_DiscardUnknown() {
- xxx_messageInfo_Query_Order.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query_Order proto.InternalMessageInfo
-
-const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
-
-func (m *Query_Order) GetProperty() string {
- if m != nil && m.Property != nil {
- return *m.Property
- }
- return ""
-}
-
-func (m *Query_Order) GetDirection() Query_Order_Direction {
- if m != nil && m.Direction != nil {
- return *m.Direction
- }
- return Default_Query_Order_Direction
-}
-
-type CompiledQuery struct {
- Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
- Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
- IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
- Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
- KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
- PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
- DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
- Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
-func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery) ProtoMessage() {}
-func (*CompiledQuery) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16}
-}
-func (m *CompiledQuery) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery.Unmarshal(m, b)
-}
-func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery.Merge(dst, src)
-}
-func (m *CompiledQuery) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery.Size(m)
-}
-func (m *CompiledQuery) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo
-
-const Default_CompiledQuery_Offset int32 = 0
-
-func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
- if m != nil {
- return m.Primaryscan
- }
- return nil
-}
-
-func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
- if m != nil {
- return m.Mergejoinscan
- }
- return nil
-}
-
-func (m *CompiledQuery) GetIndexDef() *Index {
- if m != nil {
- return m.IndexDef
- }
- return nil
-}
-
-func (m *CompiledQuery) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_CompiledQuery_Offset
-}
-
-func (m *CompiledQuery) GetLimit() int32 {
- if m != nil && m.Limit != nil {
- return *m.Limit
- }
- return 0
-}
-
-func (m *CompiledQuery) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return false
-}
-
-func (m *CompiledQuery) GetPropertyName() []string {
- if m != nil {
- return m.PropertyName
- }
- return nil
-}
-
-func (m *CompiledQuery) GetDistinctInfixSize() int32 {
- if m != nil && m.DistinctInfixSize != nil {
- return *m.DistinctInfixSize
- }
- return 0
-}
-
-func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
- if m != nil {
- return m.Entityfilter
- }
- return nil
-}
-
-type CompiledQuery_PrimaryScan struct {
- IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"`
- StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
- StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"`
- EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"`
- EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"`
- StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"`
- EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"`
- EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
-func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
-func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0}
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo
-
-func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
- if m != nil && m.IndexName != nil {
- return *m.IndexName
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
- if m != nil && m.StartKey != nil {
- return *m.StartKey
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
- if m != nil && m.StartInclusive != nil {
- return *m.StartInclusive
- }
- return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
- if m != nil && m.EndKey != nil {
- return *m.EndKey
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
- if m != nil && m.EndInclusive != nil {
- return *m.EndInclusive
- }
- return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
- if m != nil {
- return m.StartPostfixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
- if m != nil {
- return m.EndPostfixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
- if m != nil && m.EndUnappliedLogTimestampUs != nil {
- return *m.EndUnappliedLogTimestampUs
- }
- return 0
-}
-
-type CompiledQuery_MergeJoinScan struct {
- IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
- PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
- ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
-func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
-func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1}
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo
-
-const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
-
-func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
- if m != nil && m.IndexName != nil {
- return *m.IndexName
- }
- return ""
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
- if m != nil {
- return m.PrefixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
- if m != nil && m.ValuePrefix != nil {
- return *m.ValuePrefix
- }
- return Default_CompiledQuery_MergeJoinScan_ValuePrefix
-}
-
-type CompiledQuery_EntityFilter struct {
- Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
- Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
-func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_EntityFilter) ProtoMessage() {}
-func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2}
-}
-func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b)
-}
-func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src)
-}
-func (m *CompiledQuery_EntityFilter) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m)
-}
-func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo
-
-const Default_CompiledQuery_EntityFilter_Distinct bool = false
-
-func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
- if m != nil && m.Distinct != nil {
- return *m.Distinct
- }
- return Default_CompiledQuery_EntityFilter_Distinct
-}
-
-func (m *CompiledQuery_EntityFilter) GetKind() string {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return ""
-}
-
-func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
- if m != nil {
- return m.Ancestor
- }
- return nil
-}
-
-type CompiledCursor struct {
- Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
-func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor) ProtoMessage() {}
-func (*CompiledCursor) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17}
-}
-func (m *CompiledCursor) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledCursor.Unmarshal(m, b)
-}
-func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledCursor.Merge(dst, src)
-}
-func (m *CompiledCursor) XXX_Size() int {
- return xxx_messageInfo_CompiledCursor.Size(m)
-}
-func (m *CompiledCursor) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledCursor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo
-
-func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
- if m != nil {
- return m.Position
- }
- return nil
-}
-
-type CompiledCursor_Position struct {
- StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
- Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
- Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
- StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
-func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position) ProtoMessage() {}
-func (*CompiledCursor_Position) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0}
-}
-func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b)
-}
-func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledCursor_Position.Merge(dst, src)
-}
-func (m *CompiledCursor_Position) XXX_Size() int {
- return xxx_messageInfo_CompiledCursor_Position.Size(m)
-}
-func (m *CompiledCursor_Position) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo
-
-const Default_CompiledCursor_Position_StartInclusive bool = true
-
-func (m *CompiledCursor_Position) GetStartKey() string {
- if m != nil && m.StartKey != nil {
- return *m.StartKey
- }
- return ""
-}
-
-func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
- if m != nil {
- return m.Indexvalue
- }
- return nil
-}
-
-func (m *CompiledCursor_Position) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *CompiledCursor_Position) GetStartInclusive() bool {
- if m != nil && m.StartInclusive != nil {
- return *m.StartInclusive
- }
- return Default_CompiledCursor_Position_StartInclusive
-}
-
-type CompiledCursor_Position_IndexValue struct {
- Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
- Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
-func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
-func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0}
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Size() int {
- return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo
-
-func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
- if m != nil && m.Property != nil {
- return *m.Property
- }
- return ""
-}
-
-func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type Cursor struct {
- Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
- App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Cursor) Reset() { *m = Cursor{} }
-func (m *Cursor) String() string { return proto.CompactTextString(m) }
-func (*Cursor) ProtoMessage() {}
-func (*Cursor) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18}
-}
-func (m *Cursor) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Cursor.Unmarshal(m, b)
-}
-func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Cursor.Marshal(b, m, deterministic)
-}
-func (dst *Cursor) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Cursor.Merge(dst, src)
-}
-func (m *Cursor) XXX_Size() int {
- return xxx_messageInfo_Cursor.Size(m)
-}
-func (m *Cursor) XXX_DiscardUnknown() {
- xxx_messageInfo_Cursor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cursor proto.InternalMessageInfo
-
-func (m *Cursor) GetCursor() uint64 {
- if m != nil && m.Cursor != nil {
- return *m.Cursor
- }
- return 0
-}
-
-func (m *Cursor) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-type Error struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Error) Reset() { *m = Error{} }
-func (m *Error) String() string { return proto.CompactTextString(m) }
-func (*Error) ProtoMessage() {}
-func (*Error) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19}
-}
-func (m *Error) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Error.Unmarshal(m, b)
-}
-func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Error.Marshal(b, m, deterministic)
-}
-func (dst *Error) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Error.Merge(dst, src)
-}
-func (m *Error) XXX_Size() int {
- return xxx_messageInfo_Error.Size(m)
-}
-func (m *Error) XXX_DiscardUnknown() {
- xxx_messageInfo_Error.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Error proto.InternalMessageInfo
-
-type Cost struct {
- IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"`
- IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"`
- EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"`
- EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"`
- Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"`
- ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"`
- IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Cost) Reset() { *m = Cost{} }
-func (m *Cost) String() string { return proto.CompactTextString(m) }
-func (*Cost) ProtoMessage() {}
-func (*Cost) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20}
-}
-func (m *Cost) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Cost.Unmarshal(m, b)
-}
-func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Cost.Marshal(b, m, deterministic)
-}
-func (dst *Cost) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Cost.Merge(dst, src)
-}
-func (m *Cost) XXX_Size() int {
- return xxx_messageInfo_Cost.Size(m)
-}
-func (m *Cost) XXX_DiscardUnknown() {
- xxx_messageInfo_Cost.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cost proto.InternalMessageInfo
-
-func (m *Cost) GetIndexWrites() int32 {
- if m != nil && m.IndexWrites != nil {
- return *m.IndexWrites
- }
- return 0
-}
-
-func (m *Cost) GetIndexWriteBytes() int32 {
- if m != nil && m.IndexWriteBytes != nil {
- return *m.IndexWriteBytes
- }
- return 0
-}
-
-func (m *Cost) GetEntityWrites() int32 {
- if m != nil && m.EntityWrites != nil {
- return *m.EntityWrites
- }
- return 0
-}
-
-func (m *Cost) GetEntityWriteBytes() int32 {
- if m != nil && m.EntityWriteBytes != nil {
- return *m.EntityWriteBytes
- }
- return 0
-}
-
-func (m *Cost) GetCommitcost() *Cost_CommitCost {
- if m != nil {
- return m.Commitcost
- }
- return nil
-}
-
-func (m *Cost) GetApproximateStorageDelta() int32 {
- if m != nil && m.ApproximateStorageDelta != nil {
- return *m.ApproximateStorageDelta
- }
- return 0
-}
-
-func (m *Cost) GetIdSequenceUpdates() int32 {
- if m != nil && m.IdSequenceUpdates != nil {
- return *m.IdSequenceUpdates
- }
- return 0
-}
-
-type Cost_CommitCost struct {
- RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
- RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
-func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
-func (*Cost_CommitCost) ProtoMessage() {}
-func (*Cost_CommitCost) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0}
-}
-func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b)
-}
-func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic)
-}
-func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Cost_CommitCost.Merge(dst, src)
-}
-func (m *Cost_CommitCost) XXX_Size() int {
- return xxx_messageInfo_Cost_CommitCost.Size(m)
-}
-func (m *Cost_CommitCost) XXX_DiscardUnknown() {
- xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo
-
-func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
- if m != nil && m.RequestedEntityPuts != nil {
- return *m.RequestedEntityPuts
- }
- return 0
-}
-
-func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
- if m != nil && m.RequestedEntityDeletes != nil {
- return *m.RequestedEntityDeletes
- }
- return 0
-}
-
-type GetRequest struct {
- Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
- AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetRequest) Reset() { *m = GetRequest{} }
-func (m *GetRequest) String() string { return proto.CompactTextString(m) }
-func (*GetRequest) ProtoMessage() {}
-func (*GetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21}
-}
-func (m *GetRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetRequest.Unmarshal(m, b)
-}
-func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetRequest.Merge(dst, src)
-}
-func (m *GetRequest) XXX_Size() int {
- return xxx_messageInfo_GetRequest.Size(m)
-}
-func (m *GetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetRequest proto.InternalMessageInfo
-
-const Default_GetRequest_AllowDeferred bool = false
-
-func (m *GetRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *GetRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *GetRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *GetRequest) GetFailoverMs() int64 {
- if m != nil && m.FailoverMs != nil {
- return *m.FailoverMs
- }
- return 0
-}
-
-func (m *GetRequest) GetStrong() bool {
- if m != nil && m.Strong != nil {
- return *m.Strong
- }
- return false
-}
-
-func (m *GetRequest) GetAllowDeferred() bool {
- if m != nil && m.AllowDeferred != nil {
- return *m.AllowDeferred
- }
- return Default_GetRequest_AllowDeferred
-}
-
-type GetResponse struct {
- Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
- Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
- InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetResponse) Reset() { *m = GetResponse{} }
-func (m *GetResponse) String() string { return proto.CompactTextString(m) }
-func (*GetResponse) ProtoMessage() {}
-func (*GetResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22}
-}
-func (m *GetResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetResponse.Unmarshal(m, b)
-}
-func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetResponse.Merge(dst, src)
-}
-func (m *GetResponse) XXX_Size() int {
- return xxx_messageInfo_GetResponse.Size(m)
-}
-func (m *GetResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetResponse proto.InternalMessageInfo
-
-const Default_GetResponse_InOrder bool = true
-
-func (m *GetResponse) GetEntity() []*GetResponse_Entity {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *GetResponse) GetDeferred() []*Reference {
- if m != nil {
- return m.Deferred
- }
- return nil
-}
-
-func (m *GetResponse) GetInOrder() bool {
- if m != nil && m.InOrder != nil {
- return *m.InOrder
- }
- return Default_GetResponse_InOrder
-}
-
-type GetResponse_Entity struct {
- Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
- Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
- Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
-func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
-func (*GetResponse_Entity) ProtoMessage() {}
-func (*GetResponse_Entity) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0}
-}
-func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b)
-}
-func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic)
-}
-func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetResponse_Entity.Merge(dst, src)
-}
-func (m *GetResponse_Entity) XXX_Size() int {
- return xxx_messageInfo_GetResponse_Entity.Size(m)
-}
-func (m *GetResponse_Entity) XXX_DiscardUnknown() {
- xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo
-
-func (m *GetResponse_Entity) GetEntity() *EntityProto {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *GetResponse_Entity) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *GetResponse_Entity) GetVersion() int64 {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return 0
-}
-
-type PutRequest struct {
- Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
- Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutRequest) Reset() { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage() {}
-func (*PutRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23}
-}
-func (m *PutRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PutRequest.Unmarshal(m, b)
-}
-func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
-}
-func (dst *PutRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutRequest.Merge(dst, src)
-}
-func (m *PutRequest) XXX_Size() int {
- return xxx_messageInfo_PutRequest.Size(m)
-}
-func (m *PutRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_PutRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutRequest proto.InternalMessageInfo
-
-const Default_PutRequest_Trusted bool = false
-const Default_PutRequest_Force bool = false
-const Default_PutRequest_MarkChanges bool = false
-const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
-
-func (m *PutRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *PutRequest) GetEntity() []*EntityProto {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *PutRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *PutRequest) GetTrusted() bool {
- if m != nil && m.Trusted != nil {
- return *m.Trusted
- }
- return Default_PutRequest_Trusted
-}
-
-func (m *PutRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_PutRequest_Force
-}
-
-func (m *PutRequest) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_PutRequest_MarkChanges
-}
-
-func (m *PutRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
- if m != nil && m.AutoIdPolicy != nil {
- return *m.AutoIdPolicy
- }
- return Default_PutRequest_AutoIdPolicy
-}
-
-type PutResponse struct {
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutResponse) Reset() { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage() {}
-func (*PutResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24}
-}
-func (m *PutResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PutResponse.Unmarshal(m, b)
-}
-func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
-}
-func (dst *PutResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutResponse.Merge(dst, src)
-}
-func (m *PutResponse) XXX_Size() int {
- return xxx_messageInfo_PutResponse.Size(m)
-}
-func (m *PutResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_PutResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutResponse proto.InternalMessageInfo
-
-func (m *PutResponse) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *PutResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *PutResponse) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type TouchRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TouchRequest) Reset() { *m = TouchRequest{} }
-func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
-func (*TouchRequest) ProtoMessage() {}
-func (*TouchRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25}
-}
-func (m *TouchRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TouchRequest.Unmarshal(m, b)
-}
-func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic)
-}
-func (dst *TouchRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TouchRequest.Merge(dst, src)
-}
-func (m *TouchRequest) XXX_Size() int {
- return xxx_messageInfo_TouchRequest.Size(m)
-}
-func (m *TouchRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TouchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TouchRequest proto.InternalMessageInfo
-
-const Default_TouchRequest_Force bool = false
-
-func (m *TouchRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *TouchRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *TouchRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_TouchRequest_Force
-}
-
-func (m *TouchRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-type TouchResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TouchResponse) Reset() { *m = TouchResponse{} }
-func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
-func (*TouchResponse) ProtoMessage() {}
-func (*TouchResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26}
-}
-func (m *TouchResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TouchResponse.Unmarshal(m, b)
-}
-func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic)
-}
-func (dst *TouchResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TouchResponse.Merge(dst, src)
-}
-func (m *TouchResponse) XXX_Size() int {
- return xxx_messageInfo_TouchResponse.Size(m)
-}
-func (m *TouchResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_TouchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TouchResponse proto.InternalMessageInfo
-
-func (m *TouchResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-type DeleteRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
-func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRequest) ProtoMessage() {}
-func (*DeleteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27}
-}
-func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
-}
-func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
-}
-func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteRequest.Merge(dst, src)
-}
-func (m *DeleteRequest) XXX_Size() int {
- return xxx_messageInfo_DeleteRequest.Size(m)
-}
-func (m *DeleteRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
-
-const Default_DeleteRequest_Trusted bool = false
-const Default_DeleteRequest_Force bool = false
-const Default_DeleteRequest_MarkChanges bool = false
-
-func (m *DeleteRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *DeleteRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *DeleteRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *DeleteRequest) GetTrusted() bool {
- if m != nil && m.Trusted != nil {
- return *m.Trusted
- }
- return Default_DeleteRequest_Trusted
-}
-
-func (m *DeleteRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_DeleteRequest_Force
-}
-
-func (m *DeleteRequest) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_DeleteRequest_MarkChanges
-}
-
-func (m *DeleteRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-type DeleteResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
-func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteResponse) ProtoMessage() {}
-func (*DeleteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28}
-}
-func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
-}
-func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
-}
-func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteResponse.Merge(dst, src)
-}
-func (m *DeleteResponse) XXX_Size() int {
- return xxx_messageInfo_DeleteResponse.Size(m)
-}
-func (m *DeleteResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
-
-func (m *DeleteResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *DeleteResponse) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type NextRequest struct {
- Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
- Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
- Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
- Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *NextRequest) Reset() { *m = NextRequest{} }
-func (m *NextRequest) String() string { return proto.CompactTextString(m) }
-func (*NextRequest) ProtoMessage() {}
-func (*NextRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29}
-}
-func (m *NextRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NextRequest.Unmarshal(m, b)
-}
-func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic)
-}
-func (dst *NextRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NextRequest.Merge(dst, src)
-}
-func (m *NextRequest) XXX_Size() int {
- return xxx_messageInfo_NextRequest.Size(m)
-}
-func (m *NextRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_NextRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NextRequest proto.InternalMessageInfo
-
-const Default_NextRequest_Offset int32 = 0
-const Default_NextRequest_Compile bool = false
-
-func (m *NextRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *NextRequest) GetCursor() *Cursor {
- if m != nil {
- return m.Cursor
- }
- return nil
-}
-
-func (m *NextRequest) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *NextRequest) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_NextRequest_Offset
-}
-
-func (m *NextRequest) GetCompile() bool {
- if m != nil && m.Compile != nil {
- return *m.Compile
- }
- return Default_NextRequest_Compile
-}
-
-type QueryResult struct {
- Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
- Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
- SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
- MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
- KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
- IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
- SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
- CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
- Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
- Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *QueryResult) Reset() { *m = QueryResult{} }
-func (m *QueryResult) String() string { return proto.CompactTextString(m) }
-func (*QueryResult) ProtoMessage() {}
-func (*QueryResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30}
-}
-func (m *QueryResult) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_QueryResult.Unmarshal(m, b)
-}
-func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
-}
-func (dst *QueryResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryResult.Merge(dst, src)
-}
-func (m *QueryResult) XXX_Size() int {
- return xxx_messageInfo_QueryResult.Size(m)
-}
-func (m *QueryResult) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryResult proto.InternalMessageInfo
-
-func (m *QueryResult) GetCursor() *Cursor {
- if m != nil {
- return m.Cursor
- }
- return nil
-}
-
-func (m *QueryResult) GetResult() []*EntityProto {
- if m != nil {
- return m.Result
- }
- return nil
-}
-
-func (m *QueryResult) GetSkippedResults() int32 {
- if m != nil && m.SkippedResults != nil {
- return *m.SkippedResults
- }
- return 0
-}
-
-func (m *QueryResult) GetMoreResults() bool {
- if m != nil && m.MoreResults != nil {
- return *m.MoreResults
- }
- return false
-}
-
-func (m *QueryResult) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return false
-}
-
-func (m *QueryResult) GetIndexOnly() bool {
- if m != nil && m.IndexOnly != nil {
- return *m.IndexOnly
- }
- return false
-}
-
-func (m *QueryResult) GetSmallOps() bool {
- if m != nil && m.SmallOps != nil {
- return *m.SmallOps
- }
- return false
-}
-
-func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
- if m != nil {
- return m.CompiledQuery
- }
- return nil
-}
-
-func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.CompiledCursor
- }
- return nil
-}
-
-func (m *QueryResult) GetIndex() []*CompositeIndex {
- if m != nil {
- return m.Index
- }
- return nil
-}
-
-func (m *QueryResult) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type AllocateIdsRequest struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
- Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
- Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
- Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
-func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsRequest) ProtoMessage() {}
-func (*AllocateIdsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31}
-}
-func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b)
-}
-func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic)
-}
-func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocateIdsRequest.Merge(dst, src)
-}
-func (m *AllocateIdsRequest) XXX_Size() int {
- return xxx_messageInfo_AllocateIdsRequest.Size(m)
-}
-func (m *AllocateIdsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo
-
-func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AllocateIdsRequest) GetModelKey() *Reference {
- if m != nil {
- return m.ModelKey
- }
- return nil
-}
-
-func (m *AllocateIdsRequest) GetSize() int64 {
- if m != nil && m.Size != nil {
- return *m.Size
- }
- return 0
-}
-
-func (m *AllocateIdsRequest) GetMax() int64 {
- if m != nil && m.Max != nil {
- return *m.Max
- }
- return 0
-}
-
-func (m *AllocateIdsRequest) GetReserve() []*Reference {
- if m != nil {
- return m.Reserve
- }
- return nil
-}
-
-type AllocateIdsResponse struct {
- Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
- End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
- Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
-func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsResponse) ProtoMessage() {}
-func (*AllocateIdsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32}
-}
-func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b)
-}
-func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic)
-}
-func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocateIdsResponse.Merge(dst, src)
-}
-func (m *AllocateIdsResponse) XXX_Size() int {
- return xxx_messageInfo_AllocateIdsResponse.Size(m)
-}
-func (m *AllocateIdsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo
-
-func (m *AllocateIdsResponse) GetStart() int64 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *AllocateIdsResponse) GetEnd() int64 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-func (m *AllocateIdsResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-type CompositeIndices struct {
- Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
-func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndices) ProtoMessage() {}
-func (*CompositeIndices) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33}
-}
-func (m *CompositeIndices) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompositeIndices.Unmarshal(m, b)
-}
-func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic)
-}
-func (dst *CompositeIndices) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompositeIndices.Merge(dst, src)
-}
-func (m *CompositeIndices) XXX_Size() int {
- return xxx_messageInfo_CompositeIndices.Size(m)
-}
-func (m *CompositeIndices) XXX_DiscardUnknown() {
- xxx_messageInfo_CompositeIndices.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo
-
-func (m *CompositeIndices) GetIndex() []*CompositeIndex {
- if m != nil {
- return m.Index
- }
- return nil
-}
-
-type AddActionsRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
- Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
-func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
-func (*AddActionsRequest) ProtoMessage() {}
-func (*AddActionsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34}
-}
-func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b)
-}
-func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic)
-}
-func (dst *AddActionsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AddActionsRequest.Merge(dst, src)
-}
-func (m *AddActionsRequest) XXX_Size() int {
- return xxx_messageInfo_AddActionsRequest.Size(m)
-}
-func (m *AddActionsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AddActionsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo
-
-func (m *AddActionsRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AddActionsRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *AddActionsRequest) GetAction() []*Action {
- if m != nil {
- return m.Action
- }
- return nil
-}
-
-type AddActionsResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
-func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
-func (*AddActionsResponse) ProtoMessage() {}
-func (*AddActionsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35}
-}
-func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b)
-}
-func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic)
-}
-func (dst *AddActionsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AddActionsResponse.Merge(dst, src)
-}
-func (m *AddActionsResponse) XXX_Size() int {
- return xxx_messageInfo_AddActionsResponse.Size(m)
-}
-func (m *AddActionsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AddActionsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo
-
-type BeginTransactionRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
- DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
- Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
- PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
-func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
-func (*BeginTransactionRequest) ProtoMessage() {}
-func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36}
-}
-func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b)
-}
-func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic)
-}
-func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BeginTransactionRequest.Merge(dst, src)
-}
-func (m *BeginTransactionRequest) XXX_Size() int {
- return xxx_messageInfo_BeginTransactionRequest.Size(m)
-}
-func (m *BeginTransactionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo
-
-const Default_BeginTransactionRequest_AllowMultipleEg bool = false
-const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN
-
-func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *BeginTransactionRequest) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
- if m != nil && m.AllowMultipleEg != nil {
- return *m.AllowMultipleEg
- }
- return Default_BeginTransactionRequest_AllowMultipleEg
-}
-
-func (m *BeginTransactionRequest) GetDatabaseId() string {
- if m != nil && m.DatabaseId != nil {
- return *m.DatabaseId
- }
- return ""
-}
-
-func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode {
- if m != nil && m.Mode != nil {
- return *m.Mode
- }
- return Default_BeginTransactionRequest_Mode
-}
-
-func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction {
- if m != nil {
- return m.PreviousTransaction
- }
- return nil
-}
-
-type CommitResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CommitResponse) Reset() { *m = CommitResponse{} }
-func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse) ProtoMessage() {}
-func (*CommitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37}
-}
-func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CommitResponse.Unmarshal(m, b)
-}
-func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
-}
-func (dst *CommitResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CommitResponse.Merge(dst, src)
-}
-func (m *CommitResponse) XXX_Size() int {
- return xxx_messageInfo_CommitResponse.Size(m)
-}
-func (m *CommitResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_CommitResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
-
-func (m *CommitResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type CommitResponse_Version struct {
- RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
- Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
-func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse_Version) ProtoMessage() {}
-func (*CommitResponse_Version) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0}
-}
-func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b)
-}
-func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic)
-}
-func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CommitResponse_Version.Merge(dst, src)
-}
-func (m *CommitResponse_Version) XXX_Size() int {
- return xxx_messageInfo_CommitResponse_Version.Size(m)
-}
-func (m *CommitResponse_Version) XXX_DiscardUnknown() {
- xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo
-
-func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
- if m != nil {
- return m.RootEntityKey
- }
- return nil
-}
-
-func (m *CommitResponse_Version) GetVersion() int64 {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*Action)(nil), "appengine.Action")
- proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue")
- proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue")
- proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue")
- proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue")
- proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement")
- proto.RegisterType((*Property)(nil), "appengine.Property")
- proto.RegisterType((*Path)(nil), "appengine.Path")
- proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element")
- proto.RegisterType((*Reference)(nil), "appengine.Reference")
- proto.RegisterType((*User)(nil), "appengine.User")
- proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto")
- proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty")
- proto.RegisterType((*Index)(nil), "appengine.Index")
- proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property")
- proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex")
- proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix")
- proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue")
- proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition")
- proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot")
- proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader")
- proto.RegisterType((*Transaction)(nil), "appengine.Transaction")
- proto.RegisterType((*Query)(nil), "appengine.Query")
- proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter")
- proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order")
- proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery")
- proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan")
- proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan")
- proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter")
- proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor")
- proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position")
- proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue")
- proto.RegisterType((*Cursor)(nil), "appengine.Cursor")
- proto.RegisterType((*Error)(nil), "appengine.Error")
- proto.RegisterType((*Cost)(nil), "appengine.Cost")
- proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost")
- proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest")
- proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse")
- proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity")
- proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest")
- proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse")
- proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest")
- proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse")
- proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest")
- proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse")
- proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest")
- proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult")
- proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest")
- proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse")
- proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices")
- proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest")
- proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse")
- proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest")
- proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse")
- proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179)
-}
-
-var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{
- // 4156 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46,
- 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d,
- 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48,
- 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46,
- 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8,
- 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24,
- 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd,
- 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f,
- 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74,
- 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac,
- 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8,
- 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9,
- 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22,
- 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56,
- 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b,
- 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05,
- 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c,
- 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2,
- 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16,
- 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3,
- 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce,
- 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67,
- 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66,
- 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13,
- 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c,
- 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6,
- 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a,
- 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f,
- 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15,
- 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a,
- 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b,
- 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17,
- 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad,
- 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50,
- 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6,
- 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b,
- 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4,
- 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2,
- 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc,
- 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e,
- 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62,
- 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee,
- 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f,
- 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4,
- 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02,
- 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae,
- 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94,
- 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f,
- 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a,
- 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52,
- 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc,
- 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92,
- 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9,
- 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50,
- 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9,
- 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23,
- 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f,
- 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87,
- 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda,
- 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b,
- 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d,
- 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f,
- 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b,
- 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9,
- 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0,
- 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68,
- 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b,
- 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79,
- 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63,
- 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0,
- 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1,
- 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10,
- 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b,
- 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b,
- 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08,
- 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72,
- 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23,
- 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91,
- 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f,
- 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93,
- 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c,
- 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa,
- 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f,
- 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef,
- 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4,
- 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90,
- 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f,
- 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86,
- 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d,
- 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee,
- 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c,
- 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1,
- 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7,
- 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80,
- 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c,
- 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21,
- 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6,
- 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26,
- 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba,
- 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c,
- 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2,
- 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8,
- 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90,
- 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91,
- 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58,
- 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c,
- 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b,
- 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f,
- 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02,
- 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22,
- 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b,
- 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0,
- 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18,
- 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8,
- 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b,
- 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e,
- 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84,
- 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0,
- 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec,
- 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7,
- 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60,
- 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad,
- 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4,
- 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76,
- 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0,
- 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba,
- 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5,
- 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26,
- 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60,
- 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e,
- 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33,
- 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e,
- 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e,
- 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7,
- 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45,
- 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92,
- 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb,
- 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc,
- 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43,
- 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2,
- 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5,
- 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40,
- 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa,
- 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc,
- 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8,
- 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7,
- 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6,
- 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c,
- 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a,
- 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e,
- 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8,
- 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a,
- 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55,
- 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0,
- 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e,
- 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78,
- 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8,
- 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1,
- 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a,
- 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60,
- 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf,
- 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c,
- 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d,
- 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb,
- 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f,
- 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe,
- 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1,
- 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80,
- 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2,
- 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f,
- 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d,
- 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90,
- 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb,
- 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe,
- 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7,
- 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31,
- 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe,
- 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd,
- 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99,
- 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41,
- 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1,
- 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81,
- 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63,
- 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3,
- 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff,
- 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f,
- 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14,
- 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2,
- 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4,
- 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1,
- 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b,
- 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9,
- 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18,
- 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e,
- 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9,
- 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95,
- 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30,
- 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c,
- 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73,
- 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79,
- 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59,
- 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95,
- 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4,
- 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74,
- 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49,
- 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e,
- 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42,
- 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c,
- 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b,
- 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca,
- 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c,
- 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69,
- 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a,
- 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65,
- 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96,
- 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4,
- 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1,
- 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85,
- 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf,
- 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55,
- 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58,
- 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6,
- 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e,
- 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb,
- 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd,
- 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20,
- 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63,
- 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b,
- 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27,
- 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61,
- 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44,
- 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd,
- 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91,
- 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3,
- 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0,
- 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4,
- 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74,
- 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41,
- 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02,
- 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1,
- 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06,
- 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe,
- 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59,
- 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde,
- 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89,
- 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0,
- 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb,
- 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62,
- 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5,
- 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90,
- 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02,
- 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06,
- 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91,
- 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41,
- 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37,
- 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3,
- 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71,
- 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd,
- 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01,
- 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
deleted file mode 100644
index 497b4d9a9..000000000
--- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
+++ /dev/null
@@ -1,551 +0,0 @@
-syntax = "proto2";
-option go_package = "datastore";
-
-package appengine;
-
-message Action{}
-
-message PropertyValue {
- optional int64 int64Value = 1;
- optional bool booleanValue = 2;
- optional string stringValue = 3;
- optional double doubleValue = 4;
-
- optional group PointValue = 5 {
- required double x = 6;
- required double y = 7;
- }
-
- optional group UserValue = 8 {
- required string email = 9;
- required string auth_domain = 10;
- optional string nickname = 11;
- optional string federated_identity = 21;
- optional string federated_provider = 22;
- }
-
- optional group ReferenceValue = 12 {
- required string app = 13;
- optional string name_space = 20;
- repeated group PathElement = 14 {
- required string type = 15;
- optional int64 id = 16;
- optional string name = 17;
- }
- }
-}
-
-message Property {
- enum Meaning {
- NO_MEANING = 0;
- BLOB = 14;
- TEXT = 15;
- BYTESTRING = 16;
-
- ATOM_CATEGORY = 1;
- ATOM_LINK = 2;
- ATOM_TITLE = 3;
- ATOM_CONTENT = 4;
- ATOM_SUMMARY = 5;
- ATOM_AUTHOR = 6;
-
- GD_WHEN = 7;
- GD_EMAIL = 8;
- GEORSS_POINT = 9;
- GD_IM = 10;
-
- GD_PHONENUMBER = 11;
- GD_POSTALADDRESS = 12;
-
- GD_RATING = 13;
-
- BLOBKEY = 17;
- ENTITY_PROTO = 19;
-
- INDEX_VALUE = 18;
- };
-
- optional Meaning meaning = 1 [default = NO_MEANING];
- optional string meaning_uri = 2;
-
- required string name = 3;
-
- required PropertyValue value = 5;
-
- required bool multiple = 4;
-
- optional bool searchable = 6 [default=false];
-
- enum FtsTokenizationOption {
- HTML = 1;
- ATOM = 2;
- }
-
- optional FtsTokenizationOption fts_tokenization_option = 8;
-
- optional string locale = 9 [default = "en"];
-}
-
-message Path {
- repeated group Element = 1 {
- required string type = 2;
- optional int64 id = 3;
- optional string name = 4;
- }
-}
-
-message Reference {
- required string app = 13;
- optional string name_space = 20;
- required Path path = 14;
-}
-
-message User {
- required string email = 1;
- required string auth_domain = 2;
- optional string nickname = 3;
- optional string federated_identity = 6;
- optional string federated_provider = 7;
-}
-
-message EntityProto {
- required Reference key = 13;
- required Path entity_group = 16;
- optional User owner = 17;
-
- enum Kind {
- GD_CONTACT = 1;
- GD_EVENT = 2;
- GD_MESSAGE = 3;
- }
- optional Kind kind = 4;
- optional string kind_uri = 5;
-
- repeated Property property = 14;
- repeated Property raw_property = 15;
-
- optional int32 rank = 18;
-}
-
-message CompositeProperty {
- required int64 index_id = 1;
- repeated string value = 2;
-}
-
-message Index {
- required string entity_type = 1;
- required bool ancestor = 5;
- repeated group Property = 2 {
- required string name = 3;
- enum Direction {
- ASCENDING = 1;
- DESCENDING = 2;
- }
- optional Direction direction = 4 [default = ASCENDING];
- }
-}
-
-message CompositeIndex {
- required string app_id = 1;
- required int64 id = 2;
- required Index definition = 3;
-
- enum State {
- WRITE_ONLY = 1;
- READ_WRITE = 2;
- DELETED = 3;
- ERROR = 4;
- }
- required State state = 4;
-
- optional bool only_use_if_required = 6 [default = false];
-}
-
-message IndexPostfix {
- message IndexValue {
- required string property_name = 1;
- required PropertyValue value = 2;
- }
-
- repeated IndexValue index_value = 1;
-
- optional Reference key = 2;
-
- optional bool before = 3 [default=true];
-}
-
-message IndexPosition {
- optional string key = 1;
-
- optional bool before = 2 [default=true];
-}
-
-message Snapshot {
- enum Status {
- INACTIVE = 0;
- ACTIVE = 1;
- }
-
- required int64 ts = 1;
-}
-
-message InternalHeader {
- optional string qos = 1;
-}
-
-message Transaction {
- optional InternalHeader header = 4;
- required fixed64 handle = 1;
- required string app = 2;
- optional bool mark_changes = 3 [default = false];
-}
-
-message Query {
- optional InternalHeader header = 39;
-
- required string app = 1;
- optional string name_space = 29;
-
- optional string kind = 3;
- optional Reference ancestor = 17;
-
- repeated group Filter = 4 {
- enum Operator {
- LESS_THAN = 1;
- LESS_THAN_OR_EQUAL = 2;
- GREATER_THAN = 3;
- GREATER_THAN_OR_EQUAL = 4;
- EQUAL = 5;
- IN = 6;
- EXISTS = 7;
- }
-
- required Operator op = 6;
- repeated Property property = 14;
- }
-
- optional string search_query = 8;
-
- repeated group Order = 9 {
- enum Direction {
- ASCENDING = 1;
- DESCENDING = 2;
- }
-
- required string property = 10;
- optional Direction direction = 11 [default = ASCENDING];
- }
-
- enum Hint {
- ORDER_FIRST = 1;
- ANCESTOR_FIRST = 2;
- FILTER_FIRST = 3;
- }
- optional Hint hint = 18;
-
- optional int32 count = 23;
-
- optional int32 offset = 12 [default = 0];
-
- optional int32 limit = 16;
-
- optional CompiledCursor compiled_cursor = 30;
- optional CompiledCursor end_compiled_cursor = 31;
-
- repeated CompositeIndex composite_index = 19;
-
- optional bool require_perfect_plan = 20 [default = false];
-
- optional bool keys_only = 21 [default = false];
-
- optional Transaction transaction = 22;
-
- optional bool compile = 25 [default = false];
-
- optional int64 failover_ms = 26;
-
- optional bool strong = 32;
-
- repeated string property_name = 33;
-
- repeated string group_by_property_name = 34;
-
- optional bool distinct = 24;
-
- optional int64 min_safe_time_seconds = 35;
-
- repeated string safe_replica_name = 36;
-
- optional bool persist_offset = 37 [default=false];
-}
-
-message CompiledQuery {
- required group PrimaryScan = 1 {
- optional string index_name = 2;
-
- optional string start_key = 3;
- optional bool start_inclusive = 4;
- optional string end_key = 5;
- optional bool end_inclusive = 6;
-
- repeated string start_postfix_value = 22;
- repeated string end_postfix_value = 23;
-
- optional int64 end_unapplied_log_timestamp_us = 19;
- }
-
- repeated group MergeJoinScan = 7 {
- required string index_name = 8;
-
- repeated string prefix_value = 9;
-
- optional bool value_prefix = 20 [default=false];
- }
-
- optional Index index_def = 21;
-
- optional int32 offset = 10 [default = 0];
-
- optional int32 limit = 11;
-
- required bool keys_only = 12;
-
- repeated string property_name = 24;
-
- optional int32 distinct_infix_size = 25;
-
- optional group EntityFilter = 13 {
- optional bool distinct = 14 [default=false];
-
- optional string kind = 17;
- optional Reference ancestor = 18;
- }
-}
-
-message CompiledCursor {
- optional group Position = 2 {
- optional string start_key = 27;
-
- repeated group IndexValue = 29 {
- optional string property = 30;
- required PropertyValue value = 31;
- }
-
- optional Reference key = 32;
-
- optional bool start_inclusive = 28 [default=true];
- }
-}
-
-message Cursor {
- required fixed64 cursor = 1;
-
- optional string app = 2;
-}
-
-message Error {
- enum ErrorCode {
- BAD_REQUEST = 1;
- CONCURRENT_TRANSACTION = 2;
- INTERNAL_ERROR = 3;
- NEED_INDEX = 4;
- TIMEOUT = 5;
- PERMISSION_DENIED = 6;
- BIGTABLE_ERROR = 7;
- COMMITTED_BUT_STILL_APPLYING = 8;
- CAPABILITY_DISABLED = 9;
- TRY_ALTERNATE_BACKEND = 10;
- SAFE_TIME_TOO_OLD = 11;
- }
-}
-
-message Cost {
- optional int32 index_writes = 1;
- optional int32 index_write_bytes = 2;
- optional int32 entity_writes = 3;
- optional int32 entity_write_bytes = 4;
- optional group CommitCost = 5 {
- optional int32 requested_entity_puts = 6;
- optional int32 requested_entity_deletes = 7;
- };
- optional int32 approximate_storage_delta = 8;
- optional int32 id_sequence_updates = 9;
-}
-
-message GetRequest {
- optional InternalHeader header = 6;
-
- repeated Reference key = 1;
- optional Transaction transaction = 2;
-
- optional int64 failover_ms = 3;
-
- optional bool strong = 4;
-
- optional bool allow_deferred = 5 [default=false];
-}
-
-message GetResponse {
- repeated group Entity = 1 {
- optional EntityProto entity = 2;
- optional Reference key = 4;
-
- optional int64 version = 3;
- }
-
- repeated Reference deferred = 5;
-
- optional bool in_order = 6 [default=true];
-}
-
-message PutRequest {
- optional InternalHeader header = 11;
-
- repeated EntityProto entity = 1;
- optional Transaction transaction = 2;
- repeated CompositeIndex composite_index = 3;
-
- optional bool trusted = 4 [default = false];
-
- optional bool force = 7 [default = false];
-
- optional bool mark_changes = 8 [default = false];
- repeated Snapshot snapshot = 9;
-
- enum AutoIdPolicy {
- CURRENT = 0;
- SEQUENTIAL = 1;
- }
- optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
-}
-
-message PutResponse {
- repeated Reference key = 1;
- optional Cost cost = 2;
- repeated int64 version = 3;
-}
-
-message TouchRequest {
- optional InternalHeader header = 10;
-
- repeated Reference key = 1;
- repeated CompositeIndex composite_index = 2;
- optional bool force = 3 [default = false];
- repeated Snapshot snapshot = 9;
-}
-
-message TouchResponse {
- optional Cost cost = 1;
-}
-
-message DeleteRequest {
- optional InternalHeader header = 10;
-
- repeated Reference key = 6;
- optional Transaction transaction = 5;
-
- optional bool trusted = 4 [default = false];
-
- optional bool force = 7 [default = false];
-
- optional bool mark_changes = 8 [default = false];
- repeated Snapshot snapshot = 9;
-}
-
-message DeleteResponse {
- optional Cost cost = 1;
- repeated int64 version = 3;
-}
-
-message NextRequest {
- optional InternalHeader header = 5;
-
- required Cursor cursor = 1;
- optional int32 count = 2;
-
- optional int32 offset = 4 [default = 0];
-
- optional bool compile = 3 [default = false];
-}
-
-message QueryResult {
- optional Cursor cursor = 1;
-
- repeated EntityProto result = 2;
-
- optional int32 skipped_results = 7;
-
- required bool more_results = 3;
-
- optional bool keys_only = 4;
-
- optional bool index_only = 9;
-
- optional bool small_ops = 10;
-
- optional CompiledQuery compiled_query = 5;
-
- optional CompiledCursor compiled_cursor = 6;
-
- repeated CompositeIndex index = 8;
-
- repeated int64 version = 11;
-}
-
-message AllocateIdsRequest {
- optional InternalHeader header = 4;
-
- optional Reference model_key = 1;
-
- optional int64 size = 2;
-
- optional int64 max = 3;
-
- repeated Reference reserve = 5;
-}
-
-message AllocateIdsResponse {
- required int64 start = 1;
- required int64 end = 2;
- optional Cost cost = 3;
-}
-
-message CompositeIndices {
- repeated CompositeIndex index = 1;
-}
-
-message AddActionsRequest {
- optional InternalHeader header = 3;
-
- required Transaction transaction = 1;
- repeated Action action = 2;
-}
-
-message AddActionsResponse {
-}
-
-message BeginTransactionRequest {
- optional InternalHeader header = 3;
-
- required string app = 1;
- optional bool allow_multiple_eg = 2 [default = false];
- optional string database_id = 4;
-
- enum TransactionMode {
- UNKNOWN = 0;
- READ_ONLY = 1;
- READ_WRITE = 2;
- }
- optional TransactionMode mode = 5 [default = UNKNOWN];
-
- optional Transaction previous_transaction = 7;
-}
-
-message CommitResponse {
- optional Cost cost = 1;
-
- repeated group Version = 3 {
- required Reference root_entity_key = 4;
- required int64 version = 5;
- }
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
deleted file mode 100644
index 9b4134e42..000000000
--- a/vendor/google.golang.org/appengine/internal/identity.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "os"
-
- netcontext "golang.org/x/net/context"
-)
-
-var (
- // This is set to true in identity_classic.go, which is behind the appengine build tag.
- // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
- // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
- // first-gen runtime. See IsStandard below for the second-gen check.
- appengineStandard bool
-
- // This is set to true in identity_flex.go, which is behind the appenginevm build tag.
- appengineFlex bool
-)
-
-// AppID is the implementation of the wrapper function of the same name in
-// ../identity.go. See that file for commentary.
-func AppID(c netcontext.Context) string {
- return appID(FullyQualifiedAppID(c))
-}
-
-// IsStandard is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsStandard() bool {
- // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
- // second-gen (>= Go 1.11).
- return appengineStandard || IsSecondGen()
-}
-
-// IsStandard is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsSecondGen() bool {
- // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
- return os.Getenv("GAE_ENV") == "standard"
-}
-
-// IsFlex is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsFlex() bool {
- return appengineFlex
-}
-
-// IsAppEngine is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsAppEngine() bool {
- return IsStandard() || IsFlex()
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
deleted file mode 100644
index 4e979f45e..000000000
--- a/vendor/google.golang.org/appengine/internal/identity_classic.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import (
- "appengine"
-
- netcontext "golang.org/x/net/context"
-)
-
-func init() {
- appengineStandard = true
-}
-
-func DefaultVersionHostname(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return appengine.DefaultVersionHostname(c)
-}
-
-func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
-func ServerSoftware() string { return appengine.ServerSoftware() }
-func InstanceID() string { return appengine.InstanceID() }
-func IsDevAppServer() bool { return appengine.IsDevAppServer() }
-
-func RequestID(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return appengine.RequestID(c)
-}
-
-func ModuleName(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return appengine.ModuleName(c)
-}
-func VersionID(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return appengine.VersionID(c)
-}
-
-func fullyQualifiedAppID(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return c.FullyQualifiedAppID()
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go
deleted file mode 100644
index d5e2e7b5e..000000000
--- a/vendor/google.golang.org/appengine/internal/identity_flex.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2018 Google LLC. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appenginevm
-
-package internal
-
-func init() {
- appengineFlex = true
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
deleted file mode 100644
index 5d8067263..000000000
--- a/vendor/google.golang.org/appengine/internal/identity_vm.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "log"
- "net/http"
- "os"
- "strings"
-
- netcontext "golang.org/x/net/context"
-)
-
-// These functions are implementations of the wrapper functions
-// in ../appengine/identity.go. See that file for commentary.
-
-const (
- hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
- hRequestLogId = "X-AppEngine-Request-Log-Id"
- hDatacenter = "X-AppEngine-Datacenter"
-)
-
-func ctxHeaders(ctx netcontext.Context) http.Header {
- c := fromContext(ctx)
- if c == nil {
- return nil
- }
- return c.Request().Header
-}
-
-func DefaultVersionHostname(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hDefaultVersionHostname)
-}
-
-func RequestID(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hRequestLogId)
-}
-
-func Datacenter(ctx netcontext.Context) string {
- if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
- return dc
- }
- // If the header isn't set, read zone from the metadata service.
- // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
- zone, err := getMetadata("instance/zone")
- if err != nil {
- log.Printf("Datacenter: %v", err)
- return ""
- }
- parts := strings.Split(string(zone), "/")
- if len(parts) == 0 {
- return ""
- }
- return parts[len(parts)-1]
-}
-
-func ServerSoftware() string {
- // TODO(dsymonds): Remove fallback when we've verified this.
- if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
- return s
- }
- if s := os.Getenv("GAE_ENV"); s != "" {
- return s
- }
- return "Google App Engine/1.x.x"
-}
-
-// TODO(dsymonds): Remove the metadata fetches.
-
-func ModuleName(_ netcontext.Context) string {
- if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
- return s
- }
- if s := os.Getenv("GAE_SERVICE"); s != "" {
- return s
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_name"))
-}
-
-func VersionID(_ netcontext.Context) string {
- if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
- return s1 + "." + s2
- }
- if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
- return s1 + "." + s2
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
-}
-
-func InstanceID() string {
- if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
- return s
- }
- if s := os.Getenv("GAE_INSTANCE"); s != "" {
- return s
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
-}
-
-func partitionlessAppID() string {
- // gae_project has everything except the partition prefix.
- if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
- return appID
- }
- if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
- return project
- }
- return string(mustGetMetadata("instance/attributes/gae_project"))
-}
-
-func fullyQualifiedAppID(_ netcontext.Context) string {
- if s := os.Getenv("GAE_APPLICATION"); s != "" {
- return s
- }
- appID := partitionlessAppID()
-
- part := os.Getenv("GAE_PARTITION")
- if part == "" {
- part = string(mustGetMetadata("instance/attributes/gae_partition"))
- }
-
- if part != "" {
- appID = part + "~" + appID
- }
- return appID
-}
-
-func IsDevAppServer() bool {
- return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
-}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
deleted file mode 100644
index 051ea3980..000000000
--- a/vendor/google.golang.org/appengine/internal/internal.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package internal provides support for package appengine.
-//
-// Programs should not use this package directly. Its API is not stable.
-// Use packages appengine and appengine/* instead.
-package internal
-
-import (
- "fmt"
-
- "github.com/golang/protobuf/proto"
-
- remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-// errorCodeMaps is a map of service name to the error code map for the service.
-var errorCodeMaps = make(map[string]map[int32]string)
-
-// RegisterErrorCodeMap is called from API implementations to register their
-// error code map. This should only be called from init functions.
-func RegisterErrorCodeMap(service string, m map[int32]string) {
- errorCodeMaps[service] = m
-}
-
-type timeoutCodeKey struct {
- service string
- code int32
-}
-
-// timeoutCodes is the set of service+code pairs that represent timeouts.
-var timeoutCodes = make(map[timeoutCodeKey]bool)
-
-func RegisterTimeoutErrorCode(service string, code int32) {
- timeoutCodes[timeoutCodeKey{service, code}] = true
-}
-
-// APIError is the type returned by appengine.Context's Call method
-// when an API call fails in an API-specific way. This may be, for instance,
-// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
-type APIError struct {
- Service string
- Detail string
- Code int32 // API-specific error code
-}
-
-func (e *APIError) Error() string {
- if e.Code == 0 {
- if e.Detail == "" {
- return "APIError <empty>"
- }
- return e.Detail
- }
- s := fmt.Sprintf("API error %d", e.Code)
- if m, ok := errorCodeMaps[e.Service]; ok {
- s += " (" + e.Service + ": " + m[e.Code] + ")"
- } else {
- // Shouldn't happen, but provide a bit more detail if it does.
- s = e.Service + " " + s
- }
- if e.Detail != "" {
- s += ": " + e.Detail
- }
- return s
-}
-
-func (e *APIError) IsTimeout() bool {
- return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
-}
-
-// CallError is the type returned by appengine.Context's Call method when an
-// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
-type CallError struct {
- Detail string
- Code int32
- // TODO: Remove this if we get a distinguishable error code.
- Timeout bool
-}
-
-func (e *CallError) Error() string {
- var msg string
- switch remotepb.RpcError_ErrorCode(e.Code) {
- case remotepb.RpcError_UNKNOWN:
- return e.Detail
- case remotepb.RpcError_OVER_QUOTA:
- msg = "Over quota"
- case remotepb.RpcError_CAPABILITY_DISABLED:
- msg = "Capability disabled"
- case remotepb.RpcError_CANCELLED:
- msg = "Canceled"
- default:
- msg = fmt.Sprintf("Call error %d", e.Code)
- }
- s := msg + ": " + e.Detail
- if e.Timeout {
- s += " (timeout)"
- }
- return s
-}
-
-func (e *CallError) IsTimeout() bool {
- return e.Timeout
-}
-
-// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
-// The function should be prepared to be called on the same message more than once; it should only modify the
-// RPC request the first time.
-var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
deleted file mode 100644
index 8545ac4ad..000000000
--- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
+++ /dev/null
@@ -1,1313 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/log/log_service.proto
-
-package log
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type LogServiceError_ErrorCode int32
-
-const (
- LogServiceError_OK LogServiceError_ErrorCode = 0
- LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
- LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
-)
-
-var LogServiceError_ErrorCode_name = map[int32]string{
- 0: "OK",
- 1: "INVALID_REQUEST",
- 2: "STORAGE_ERROR",
-}
-var LogServiceError_ErrorCode_value = map[string]int32{
- "OK": 0,
- "INVALID_REQUEST": 1,
- "STORAGE_ERROR": 2,
-}
-
-func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
- p := new(LogServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x LogServiceError_ErrorCode) String() string {
- return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
-}
-func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = LogServiceError_ErrorCode(value)
- return nil
-}
-func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0}
-}
-
-type LogServiceError struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogServiceError) Reset() { *m = LogServiceError{} }
-func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
-func (*LogServiceError) ProtoMessage() {}
-func (*LogServiceError) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{0}
-}
-func (m *LogServiceError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogServiceError.Unmarshal(m, b)
-}
-func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic)
-}
-func (dst *LogServiceError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogServiceError.Merge(dst, src)
-}
-func (m *LogServiceError) XXX_Size() int {
- return xxx_messageInfo_LogServiceError.Size(m)
-}
-func (m *LogServiceError) XXX_DiscardUnknown() {
- xxx_messageInfo_LogServiceError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogServiceError proto.InternalMessageInfo
-
-type UserAppLogLine struct {
- TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
- Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
-func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogLine) ProtoMessage() {}
-func (*UserAppLogLine) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{1}
-}
-func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b)
-}
-func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic)
-}
-func (dst *UserAppLogLine) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserAppLogLine.Merge(dst, src)
-}
-func (m *UserAppLogLine) XXX_Size() int {
- return xxx_messageInfo_UserAppLogLine.Size(m)
-}
-func (m *UserAppLogLine) XXX_DiscardUnknown() {
- xxx_messageInfo_UserAppLogLine.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo
-
-func (m *UserAppLogLine) GetTimestampUsec() int64 {
- if m != nil && m.TimestampUsec != nil {
- return *m.TimestampUsec
- }
- return 0
-}
-
-func (m *UserAppLogLine) GetLevel() int64 {
- if m != nil && m.Level != nil {
- return *m.Level
- }
- return 0
-}
-
-func (m *UserAppLogLine) GetMessage() string {
- if m != nil && m.Message != nil {
- return *m.Message
- }
- return ""
-}
-
-type UserAppLogGroup struct {
- LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
-func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogGroup) ProtoMessage() {}
-func (*UserAppLogGroup) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{2}
-}
-func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b)
-}
-func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic)
-}
-func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserAppLogGroup.Merge(dst, src)
-}
-func (m *UserAppLogGroup) XXX_Size() int {
- return xxx_messageInfo_UserAppLogGroup.Size(m)
-}
-func (m *UserAppLogGroup) XXX_DiscardUnknown() {
- xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo
-
-func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
- if m != nil {
- return m.LogLine
- }
- return nil
-}
-
-type FlushRequest struct {
- Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FlushRequest) Reset() { *m = FlushRequest{} }
-func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
-func (*FlushRequest) ProtoMessage() {}
-func (*FlushRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{3}
-}
-func (m *FlushRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FlushRequest.Unmarshal(m, b)
-}
-func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic)
-}
-func (dst *FlushRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlushRequest.Merge(dst, src)
-}
-func (m *FlushRequest) XXX_Size() int {
- return xxx_messageInfo_FlushRequest.Size(m)
-}
-func (m *FlushRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_FlushRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FlushRequest proto.InternalMessageInfo
-
-func (m *FlushRequest) GetLogs() []byte {
- if m != nil {
- return m.Logs
- }
- return nil
-}
-
-type SetStatusRequest struct {
- Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
-func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*SetStatusRequest) ProtoMessage() {}
-func (*SetStatusRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{4}
-}
-func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b)
-}
-func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic)
-}
-func (dst *SetStatusRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SetStatusRequest.Merge(dst, src)
-}
-func (m *SetStatusRequest) XXX_Size() int {
- return xxx_messageInfo_SetStatusRequest.Size(m)
-}
-func (m *SetStatusRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_SetStatusRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo
-
-func (m *SetStatusRequest) GetStatus() string {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return ""
-}
-
-type LogOffset struct {
- RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogOffset) Reset() { *m = LogOffset{} }
-func (m *LogOffset) String() string { return proto.CompactTextString(m) }
-func (*LogOffset) ProtoMessage() {}
-func (*LogOffset) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{5}
-}
-func (m *LogOffset) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogOffset.Unmarshal(m, b)
-}
-func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic)
-}
-func (dst *LogOffset) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogOffset.Merge(dst, src)
-}
-func (m *LogOffset) XXX_Size() int {
- return xxx_messageInfo_LogOffset.Size(m)
-}
-func (m *LogOffset) XXX_DiscardUnknown() {
- xxx_messageInfo_LogOffset.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogOffset proto.InternalMessageInfo
-
-func (m *LogOffset) GetRequestId() []byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-type LogLine struct {
- Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
- Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogLine) Reset() { *m = LogLine{} }
-func (m *LogLine) String() string { return proto.CompactTextString(m) }
-func (*LogLine) ProtoMessage() {}
-func (*LogLine) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{6}
-}
-func (m *LogLine) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogLine.Unmarshal(m, b)
-}
-func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogLine.Marshal(b, m, deterministic)
-}
-func (dst *LogLine) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogLine.Merge(dst, src)
-}
-func (m *LogLine) XXX_Size() int {
- return xxx_messageInfo_LogLine.Size(m)
-}
-func (m *LogLine) XXX_DiscardUnknown() {
- xxx_messageInfo_LogLine.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogLine proto.InternalMessageInfo
-
-func (m *LogLine) GetTime() int64 {
- if m != nil && m.Time != nil {
- return *m.Time
- }
- return 0
-}
-
-func (m *LogLine) GetLevel() int32 {
- if m != nil && m.Level != nil {
- return *m.Level
- }
- return 0
-}
-
-func (m *LogLine) GetLogMessage() string {
- if m != nil && m.LogMessage != nil {
- return *m.LogMessage
- }
- return ""
-}
-
-type RequestLog struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
- VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"`
- RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"`
- Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
- Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
- Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
- StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"`
- Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
- Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
- Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
- Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
- HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"`
- Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
- ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"`
- Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
- UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"`
- UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"`
- Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
- ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"`
- Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
- Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
- TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"`
- TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"`
- WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"`
- PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"`
- ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"`
- Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
- CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"`
- Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
- LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"`
- AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"`
- ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"`
- WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"`
- WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"`
- ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
- ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RequestLog) Reset() { *m = RequestLog{} }
-func (m *RequestLog) String() string { return proto.CompactTextString(m) }
-func (*RequestLog) ProtoMessage() {}
-func (*RequestLog) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{7}
-}
-func (m *RequestLog) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RequestLog.Unmarshal(m, b)
-}
-func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic)
-}
-func (dst *RequestLog) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RequestLog.Merge(dst, src)
-}
-func (m *RequestLog) XXX_Size() int {
- return xxx_messageInfo_RequestLog.Size(m)
-}
-func (m *RequestLog) XXX_DiscardUnknown() {
- xxx_messageInfo_RequestLog.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestLog proto.InternalMessageInfo
-
-const Default_RequestLog_ModuleId string = "default"
-const Default_RequestLog_ReplicaIndex int32 = -1
-const Default_RequestLog_Finished bool = true
-
-func (m *RequestLog) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *RequestLog) GetModuleId() string {
- if m != nil && m.ModuleId != nil {
- return *m.ModuleId
- }
- return Default_RequestLog_ModuleId
-}
-
-func (m *RequestLog) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-func (m *RequestLog) GetRequestId() []byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-func (m *RequestLog) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *RequestLog) GetIp() string {
- if m != nil && m.Ip != nil {
- return *m.Ip
- }
- return ""
-}
-
-func (m *RequestLog) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *RequestLog) GetStartTime() int64 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *RequestLog) GetEndTime() int64 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *RequestLog) GetLatency() int64 {
- if m != nil && m.Latency != nil {
- return *m.Latency
- }
- return 0
-}
-
-func (m *RequestLog) GetMcycles() int64 {
- if m != nil && m.Mcycles != nil {
- return *m.Mcycles
- }
- return 0
-}
-
-func (m *RequestLog) GetMethod() string {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return ""
-}
-
-func (m *RequestLog) GetResource() string {
- if m != nil && m.Resource != nil {
- return *m.Resource
- }
- return ""
-}
-
-func (m *RequestLog) GetHttpVersion() string {
- if m != nil && m.HttpVersion != nil {
- return *m.HttpVersion
- }
- return ""
-}
-
-func (m *RequestLog) GetStatus() int32 {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return 0
-}
-
-func (m *RequestLog) GetResponseSize() int64 {
- if m != nil && m.ResponseSize != nil {
- return *m.ResponseSize
- }
- return 0
-}
-
-func (m *RequestLog) GetReferrer() string {
- if m != nil && m.Referrer != nil {
- return *m.Referrer
- }
- return ""
-}
-
-func (m *RequestLog) GetUserAgent() string {
- if m != nil && m.UserAgent != nil {
- return *m.UserAgent
- }
- return ""
-}
-
-func (m *RequestLog) GetUrlMapEntry() string {
- if m != nil && m.UrlMapEntry != nil {
- return *m.UrlMapEntry
- }
- return ""
-}
-
-func (m *RequestLog) GetCombined() string {
- if m != nil && m.Combined != nil {
- return *m.Combined
- }
- return ""
-}
-
-func (m *RequestLog) GetApiMcycles() int64 {
- if m != nil && m.ApiMcycles != nil {
- return *m.ApiMcycles
- }
- return 0
-}
-
-func (m *RequestLog) GetHost() string {
- if m != nil && m.Host != nil {
- return *m.Host
- }
- return ""
-}
-
-func (m *RequestLog) GetCost() float64 {
- if m != nil && m.Cost != nil {
- return *m.Cost
- }
- return 0
-}
-
-func (m *RequestLog) GetTaskQueueName() string {
- if m != nil && m.TaskQueueName != nil {
- return *m.TaskQueueName
- }
- return ""
-}
-
-func (m *RequestLog) GetTaskName() string {
- if m != nil && m.TaskName != nil {
- return *m.TaskName
- }
- return ""
-}
-
-func (m *RequestLog) GetWasLoadingRequest() bool {
- if m != nil && m.WasLoadingRequest != nil {
- return *m.WasLoadingRequest
- }
- return false
-}
-
-func (m *RequestLog) GetPendingTime() int64 {
- if m != nil && m.PendingTime != nil {
- return *m.PendingTime
- }
- return 0
-}
-
-func (m *RequestLog) GetReplicaIndex() int32 {
- if m != nil && m.ReplicaIndex != nil {
- return *m.ReplicaIndex
- }
- return Default_RequestLog_ReplicaIndex
-}
-
-func (m *RequestLog) GetFinished() bool {
- if m != nil && m.Finished != nil {
- return *m.Finished
- }
- return Default_RequestLog_Finished
-}
-
-func (m *RequestLog) GetCloneKey() []byte {
- if m != nil {
- return m.CloneKey
- }
- return nil
-}
-
-func (m *RequestLog) GetLine() []*LogLine {
- if m != nil {
- return m.Line
- }
- return nil
-}
-
-func (m *RequestLog) GetLinesIncomplete() bool {
- if m != nil && m.LinesIncomplete != nil {
- return *m.LinesIncomplete
- }
- return false
-}
-
-func (m *RequestLog) GetAppEngineRelease() []byte {
- if m != nil {
- return m.AppEngineRelease
- }
- return nil
-}
-
-func (m *RequestLog) GetExitReason() int32 {
- if m != nil && m.ExitReason != nil {
- return *m.ExitReason
- }
- return 0
-}
-
-func (m *RequestLog) GetWasThrottledForTime() bool {
- if m != nil && m.WasThrottledForTime != nil {
- return *m.WasThrottledForTime
- }
- return false
-}
-
-func (m *RequestLog) GetWasThrottledForRequests() bool {
- if m != nil && m.WasThrottledForRequests != nil {
- return *m.WasThrottledForRequests
- }
- return false
-}
-
-func (m *RequestLog) GetThrottledTime() int64 {
- if m != nil && m.ThrottledTime != nil {
- return *m.ThrottledTime
- }
- return 0
-}
-
-func (m *RequestLog) GetServerName() []byte {
- if m != nil {
- return m.ServerName
- }
- return nil
-}
-
-type LogModuleVersion struct {
- ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
- VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
-func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
-func (*LogModuleVersion) ProtoMessage() {}
-func (*LogModuleVersion) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{8}
-}
-func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b)
-}
-func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic)
-}
-func (dst *LogModuleVersion) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogModuleVersion.Merge(dst, src)
-}
-func (m *LogModuleVersion) XXX_Size() int {
- return xxx_messageInfo_LogModuleVersion.Size(m)
-}
-func (m *LogModuleVersion) XXX_DiscardUnknown() {
- xxx_messageInfo_LogModuleVersion.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo
-
-const Default_LogModuleVersion_ModuleId string = "default"
-
-func (m *LogModuleVersion) GetModuleId() string {
- if m != nil && m.ModuleId != nil {
- return *m.ModuleId
- }
- return Default_LogModuleVersion_ModuleId
-}
-
-func (m *LogModuleVersion) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-type LogReadRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
- ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
- StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
- RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
- MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
- IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
- Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
- CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
- HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
- ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
- IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
- AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
- IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
- IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
- CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
- NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
-func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
-func (*LogReadRequest) ProtoMessage() {}
-func (*LogReadRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{9}
-}
-func (m *LogReadRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogReadRequest.Unmarshal(m, b)
-}
-func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic)
-}
-func (dst *LogReadRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogReadRequest.Merge(dst, src)
-}
-func (m *LogReadRequest) XXX_Size() int {
- return xxx_messageInfo_LogReadRequest.Size(m)
-}
-func (m *LogReadRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LogReadRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo
-
-func (m *LogReadRequest) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *LogReadRequest) GetVersionId() []string {
- if m != nil {
- return m.VersionId
- }
- return nil
-}
-
-func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
- if m != nil {
- return m.ModuleVersion
- }
- return nil
-}
-
-func (m *LogReadRequest) GetStartTime() int64 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogReadRequest) GetEndTime() int64 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogReadRequest) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *LogReadRequest) GetRequestId() [][]byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-func (m *LogReadRequest) GetMinimumLogLevel() int32 {
- if m != nil && m.MinimumLogLevel != nil {
- return *m.MinimumLogLevel
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeIncomplete() bool {
- if m != nil && m.IncludeIncomplete != nil {
- return *m.IncludeIncomplete
- }
- return false
-}
-
-func (m *LogReadRequest) GetCount() int64 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *LogReadRequest) GetCombinedLogRegex() string {
- if m != nil && m.CombinedLogRegex != nil {
- return *m.CombinedLogRegex
- }
- return ""
-}
-
-func (m *LogReadRequest) GetHostRegex() string {
- if m != nil && m.HostRegex != nil {
- return *m.HostRegex
- }
- return ""
-}
-
-func (m *LogReadRequest) GetReplicaIndex() int32 {
- if m != nil && m.ReplicaIndex != nil {
- return *m.ReplicaIndex
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeAppLogs() bool {
- if m != nil && m.IncludeAppLogs != nil {
- return *m.IncludeAppLogs
- }
- return false
-}
-
-func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
- if m != nil && m.AppLogsPerRequest != nil {
- return *m.AppLogsPerRequest
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeHost() bool {
- if m != nil && m.IncludeHost != nil {
- return *m.IncludeHost
- }
- return false
-}
-
-func (m *LogReadRequest) GetIncludeAll() bool {
- if m != nil && m.IncludeAll != nil {
- return *m.IncludeAll
- }
- return false
-}
-
-func (m *LogReadRequest) GetCacheIterator() bool {
- if m != nil && m.CacheIterator != nil {
- return *m.CacheIterator
- }
- return false
-}
-
-func (m *LogReadRequest) GetNumShards() int32 {
- if m != nil && m.NumShards != nil {
- return *m.NumShards
- }
- return 0
-}
-
-type LogReadResponse struct {
- Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
- Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
- LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
-func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
-func (*LogReadResponse) ProtoMessage() {}
-func (*LogReadResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{10}
-}
-func (m *LogReadResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogReadResponse.Unmarshal(m, b)
-}
-func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic)
-}
-func (dst *LogReadResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogReadResponse.Merge(dst, src)
-}
-func (m *LogReadResponse) XXX_Size() int {
- return xxx_messageInfo_LogReadResponse.Size(m)
-}
-func (m *LogReadResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LogReadResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo
-
-func (m *LogReadResponse) GetLog() []*RequestLog {
- if m != nil {
- return m.Log
- }
- return nil
-}
-
-func (m *LogReadResponse) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *LogReadResponse) GetLastEndTime() int64 {
- if m != nil && m.LastEndTime != nil {
- return *m.LastEndTime
- }
- return 0
-}
-
-type LogUsageRecord struct {
- VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
- TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
- Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
-func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRecord) ProtoMessage() {}
-func (*LogUsageRecord) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{11}
-}
-func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b)
-}
-func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageRecord) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogUsageRecord.Merge(dst, src)
-}
-func (m *LogUsageRecord) XXX_Size() int {
- return xxx_messageInfo_LogUsageRecord.Size(m)
-}
-func (m *LogUsageRecord) XXX_DiscardUnknown() {
- xxx_messageInfo_LogUsageRecord.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo
-
-func (m *LogUsageRecord) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-func (m *LogUsageRecord) GetStartTime() int32 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetEndTime() int32 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetCount() int64 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetTotalSize() int64 {
- if m != nil && m.TotalSize != nil {
- return *m.TotalSize
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetRecords() int32 {
- if m != nil && m.Records != nil {
- return *m.Records
- }
- return 0
-}
-
-type LogUsageRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
- CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
- UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
- VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
-func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRequest) ProtoMessage() {}
-func (*LogUsageRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{12}
-}
-func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b)
-}
-func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogUsageRequest.Merge(dst, src)
-}
-func (m *LogUsageRequest) XXX_Size() int {
- return xxx_messageInfo_LogUsageRequest.Size(m)
-}
-func (m *LogUsageRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LogUsageRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo
-
-const Default_LogUsageRequest_ResolutionHours uint32 = 1
-
-func (m *LogUsageRequest) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *LogUsageRequest) GetVersionId() []string {
- if m != nil {
- return m.VersionId
- }
- return nil
-}
-
-func (m *LogUsageRequest) GetStartTime() int32 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetEndTime() int32 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetResolutionHours() uint32 {
- if m != nil && m.ResolutionHours != nil {
- return *m.ResolutionHours
- }
- return Default_LogUsageRequest_ResolutionHours
-}
-
-func (m *LogUsageRequest) GetCombineVersions() bool {
- if m != nil && m.CombineVersions != nil {
- return *m.CombineVersions
- }
- return false
-}
-
-func (m *LogUsageRequest) GetUsageVersion() int32 {
- if m != nil && m.UsageVersion != nil {
- return *m.UsageVersion
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetVersionsOnly() bool {
- if m != nil && m.VersionsOnly != nil {
- return *m.VersionsOnly
- }
- return false
-}
-
-type LogUsageResponse struct {
- Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
- Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
-func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
-func (*LogUsageResponse) ProtoMessage() {}
-func (*LogUsageResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{13}
-}
-func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b)
-}
-func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogUsageResponse.Merge(dst, src)
-}
-func (m *LogUsageResponse) XXX_Size() int {
- return xxx_messageInfo_LogUsageResponse.Size(m)
-}
-func (m *LogUsageResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LogUsageResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo
-
-func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
- if m != nil {
- return m.Usage
- }
- return nil
-}
-
-func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
- if m != nil {
- return m.Summary
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError")
- proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine")
- proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup")
- proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest")
- proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest")
- proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset")
- proto.RegisterType((*LogLine)(nil), "appengine.LogLine")
- proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog")
- proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion")
- proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest")
- proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse")
- proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord")
- proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest")
- proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d)
-}
-
-var fileDescriptor_log_service_f054fd4b5012319d = []byte{
- // 1553 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6,
- 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e,
- 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40,
- 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72,
- 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d,
- 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9,
- 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32,
- 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5,
- 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3,
- 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17,
- 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f,
- 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3,
- 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46,
- 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26,
- 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31,
- 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5,
- 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd,
- 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46,
- 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe,
- 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc,
- 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1,
- 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f,
- 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39,
- 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35,
- 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd,
- 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b,
- 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c,
- 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0,
- 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36,
- 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f,
- 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a,
- 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed,
- 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95,
- 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91,
- 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87,
- 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1,
- 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5,
- 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf,
- 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce,
- 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66,
- 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0,
- 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62,
- 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c,
- 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba,
- 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a,
- 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa,
- 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4,
- 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1,
- 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62,
- 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b,
- 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4,
- 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90,
- 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78,
- 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff,
- 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56,
- 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5,
- 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b,
- 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21,
- 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69,
- 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98,
- 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51,
- 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17,
- 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c,
- 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f,
- 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f,
- 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b,
- 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10,
- 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54,
- 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d,
- 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98,
- 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0,
- 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0,
- 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61,
- 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83,
- 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3,
- 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75,
- 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38,
- 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37,
- 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e,
- 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d,
- 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65,
- 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43,
- 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06,
- 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2,
- 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea,
- 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64,
- 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1,
- 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf,
- 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36,
- 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53,
- 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7,
- 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56,
- 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b,
- 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77,
- 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05,
- 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd,
- 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00,
- 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
deleted file mode 100644
index 8981dc475..000000000
--- a/vendor/google.golang.org/appengine/internal/log/log_service.proto
+++ /dev/null
@@ -1,150 +0,0 @@
-syntax = "proto2";
-option go_package = "log";
-
-package appengine;
-
-message LogServiceError {
- enum ErrorCode {
- OK = 0;
- INVALID_REQUEST = 1;
- STORAGE_ERROR = 2;
- }
-}
-
-message UserAppLogLine {
- required int64 timestamp_usec = 1;
- required int64 level = 2;
- required string message = 3;
-}
-
-message UserAppLogGroup {
- repeated UserAppLogLine log_line = 2;
-}
-
-message FlushRequest {
- optional bytes logs = 1;
-}
-
-message SetStatusRequest {
- required string status = 1;
-}
-
-
-message LogOffset {
- optional bytes request_id = 1;
-}
-
-message LogLine {
- required int64 time = 1;
- required int32 level = 2;
- required string log_message = 3;
-}
-
-message RequestLog {
- required string app_id = 1;
- optional string module_id = 37 [default="default"];
- required string version_id = 2;
- required bytes request_id = 3;
- optional LogOffset offset = 35;
- required string ip = 4;
- optional string nickname = 5;
- required int64 start_time = 6;
- required int64 end_time = 7;
- required int64 latency = 8;
- required int64 mcycles = 9;
- required string method = 10;
- required string resource = 11;
- required string http_version = 12;
- required int32 status = 13;
- required int64 response_size = 14;
- optional string referrer = 15;
- optional string user_agent = 16;
- required string url_map_entry = 17;
- required string combined = 18;
- optional int64 api_mcycles = 19;
- optional string host = 20;
- optional double cost = 21;
-
- optional string task_queue_name = 22;
- optional string task_name = 23;
-
- optional bool was_loading_request = 24;
- optional int64 pending_time = 25;
- optional int32 replica_index = 26 [default = -1];
- optional bool finished = 27 [default = true];
- optional bytes clone_key = 28;
-
- repeated LogLine line = 29;
-
- optional bool lines_incomplete = 36;
- optional bytes app_engine_release = 38;
-
- optional int32 exit_reason = 30;
- optional bool was_throttled_for_time = 31;
- optional bool was_throttled_for_requests = 32;
- optional int64 throttled_time = 33;
-
- optional bytes server_name = 34;
-}
-
-message LogModuleVersion {
- optional string module_id = 1 [default="default"];
- optional string version_id = 2;
-}
-
-message LogReadRequest {
- required string app_id = 1;
- repeated string version_id = 2;
- repeated LogModuleVersion module_version = 19;
-
- optional int64 start_time = 3;
- optional int64 end_time = 4;
- optional LogOffset offset = 5;
- repeated bytes request_id = 6;
-
- optional int32 minimum_log_level = 7;
- optional bool include_incomplete = 8;
- optional int64 count = 9;
-
- optional string combined_log_regex = 14;
- optional string host_regex = 15;
- optional int32 replica_index = 16;
-
- optional bool include_app_logs = 10;
- optional int32 app_logs_per_request = 17;
- optional bool include_host = 11;
- optional bool include_all = 12;
- optional bool cache_iterator = 13;
- optional int32 num_shards = 18;
-}
-
-message LogReadResponse {
- repeated RequestLog log = 1;
- optional LogOffset offset = 2;
- optional int64 last_end_time = 3;
-}
-
-message LogUsageRecord {
- optional string version_id = 1;
- optional int32 start_time = 2;
- optional int32 end_time = 3;
- optional int64 count = 4;
- optional int64 total_size = 5;
- optional int32 records = 6;
-}
-
-message LogUsageRequest {
- required string app_id = 1;
- repeated string version_id = 2;
- optional int32 start_time = 3;
- optional int32 end_time = 4;
- optional uint32 resolution_hours = 5 [default = 1];
- optional bool combine_versions = 6;
- optional int32 usage_version = 7;
- optional bool versions_only = 8;
-}
-
-message LogUsageResponse {
- repeated LogUsageRecord usage = 1;
- optional LogUsageRecord summary = 2;
-}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
deleted file mode 100644
index 1e765312f..000000000
--- a/vendor/google.golang.org/appengine/internal/main.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import (
- "appengine_internal"
-)
-
-func Main() {
- MainPath = ""
- appengine_internal.Main()
-}
diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go
deleted file mode 100644
index 357dce4dd..000000000
--- a/vendor/google.golang.org/appengine/internal/main_common.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package internal
-
-// MainPath stores the file path of the main package. On App Engine Standard
-// using Go version 1.9 and below, this will be unset. On App Engine Flex and
-// App Engine Standard second-gen (Go 1.11 and above), this will be the
-// filepath to package main.
-var MainPath string
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
deleted file mode 100644
index ddb79a333..000000000
--- a/vendor/google.golang.org/appengine/internal/main_vm.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "io"
- "log"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "runtime"
-)
-
-func Main() {
- MainPath = filepath.Dir(findMainPath())
- installHealthChecker(http.DefaultServeMux)
-
- port := "8080"
- if s := os.Getenv("PORT"); s != "" {
- port = s
- }
-
- host := ""
- if IsDevAppServer() {
- host = "127.0.0.1"
- }
- if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
- log.Fatalf("http.ListenAndServe: %v", err)
- }
-}
-
-// Find the path to package main by looking at the root Caller.
-func findMainPath() string {
- pc := make([]uintptr, 100)
- n := runtime.Callers(2, pc)
- frames := runtime.CallersFrames(pc[:n])
- for {
- frame, more := frames.Next()
- // Tests won't have package main, instead they have testing.tRunner
- if frame.Function == "main.main" || frame.Function == "testing.tRunner" {
- return frame.File
- }
- if !more {
- break
- }
- }
- return ""
-}
-
-func installHealthChecker(mux *http.ServeMux) {
- // If no health check handler has been installed by this point, add a trivial one.
- const healthPath = "/_ah/health"
- hreq := &http.Request{
- Method: "GET",
- URL: &url.URL{
- Path: healthPath,
- },
- }
- if _, pat := mux.Handler(hreq); pat != healthPath {
- mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, "ok")
- })
- }
-}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
deleted file mode 100644
index c4ba63bb4..000000000
--- a/vendor/google.golang.org/appengine/internal/metadata.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file has code for accessing metadata.
-//
-// References:
-// https://cloud.google.com/compute/docs/metadata
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
-)
-
-const (
- metadataHost = "metadata"
- metadataPath = "/computeMetadata/v1/"
-)
-
-var (
- metadataRequestHeaders = http.Header{
- "Metadata-Flavor": []string{"Google"},
- }
-)
-
-// TODO(dsymonds): Do we need to support default values, like Python?
-func mustGetMetadata(key string) []byte {
- b, err := getMetadata(key)
- if err != nil {
- panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
- }
- return b
-}
-
-func getMetadata(key string) ([]byte, error) {
- // TODO(dsymonds): May need to use url.Parse to support keys with query args.
- req := &http.Request{
- Method: "GET",
- URL: &url.URL{
- Scheme: "http",
- Host: metadataHost,
- Path: metadataPath + key,
- },
- Header: metadataRequestHeaders,
- Host: metadataHost,
- }
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- if resp.StatusCode != 200 {
- return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
- }
- return ioutil.ReadAll(resp.Body)
-}
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
deleted file mode 100644
index fe429720e..000000000
--- a/vendor/google.golang.org/appengine/internal/net.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements a network dialer that limits the number of concurrent connections.
-// It is only used for API calls.
-
-import (
- "log"
- "net"
- "runtime"
- "sync"
- "time"
-)
-
-var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
-
-func limitRelease() {
- // non-blocking
- select {
- case <-limitSem:
- default:
- // This should not normally happen.
- log.Print("appengine: unbalanced limitSem release!")
- }
-}
-
-func limitDial(network, addr string) (net.Conn, error) {
- limitSem <- 1
-
- // Dial with a timeout in case the API host is MIA.
- // The connection should normally be very fast.
- conn, err := net.DialTimeout(network, addr, 10*time.Second)
- if err != nil {
- limitRelease()
- return nil, err
- }
- lc := &limitConn{Conn: conn}
- runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
- return lc, nil
-}
-
-type limitConn struct {
- close sync.Once
- net.Conn
-}
-
-func (lc *limitConn) Close() error {
- defer lc.close.Do(func() {
- limitRelease()
- runtime.SetFinalizer(lc, nil)
- })
- return lc.Conn.Close()
-}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
deleted file mode 100644
index 2fdb546a6..000000000
--- a/vendor/google.golang.org/appengine/internal/regen.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash -e
-#
-# This script rebuilds the generated code for the protocol buffers.
-# To run this you will need protoc and goprotobuf installed;
-# see https://github.com/golang/protobuf for instructions.
-
-PKG=google.golang.org/appengine
-
-function die() {
- echo 1>&2 $*
- exit 1
-}
-
-# Sanity check that the right tools are accessible.
-for tool in go protoc protoc-gen-go; do
- q=$(which $tool) || die "didn't find $tool"
- echo 1>&2 "$tool: $q"
-done
-
-echo -n 1>&2 "finding package dir... "
-pkgdir=$(go list -f '{{.Dir}}' $PKG)
-echo 1>&2 $pkgdir
-base=$(echo $pkgdir | sed "s,/$PKG\$,,")
-echo 1>&2 "base: $base"
-cd $base
-
-# Run protoc once per package.
-for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
- echo 1>&2 "* $dir"
- protoc --go_out=. $dir/*.proto
-done
-
-for f in $(find $PKG/internal -name '*.pb.go'); do
- # Remove proto.RegisterEnum calls.
- # These cause duplicate registration panics when these packages
- # are used on classic App Engine. proto.RegisterEnum only affects
- # parsing the text format; we don't care about that.
- # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
- sed -i '/proto.RegisterEnum/d' $f
-done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
deleted file mode 100644
index 8d782a38e..000000000
--- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
+++ /dev/null
@@ -1,361 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
-
-package remote_api
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type RpcError_ErrorCode int32
-
-const (
- RpcError_UNKNOWN RpcError_ErrorCode = 0
- RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
- RpcError_PARSE_ERROR RpcError_ErrorCode = 2
- RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
- RpcError_OVER_QUOTA RpcError_ErrorCode = 4
- RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
- RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
- RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
- RpcError_BAD_REQUEST RpcError_ErrorCode = 8
- RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
- RpcError_CANCELLED RpcError_ErrorCode = 10
- RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
- RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
-)
-
-var RpcError_ErrorCode_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "CALL_NOT_FOUND",
- 2: "PARSE_ERROR",
- 3: "SECURITY_VIOLATION",
- 4: "OVER_QUOTA",
- 5: "REQUEST_TOO_LARGE",
- 6: "CAPABILITY_DISABLED",
- 7: "FEATURE_DISABLED",
- 8: "BAD_REQUEST",
- 9: "RESPONSE_TOO_LARGE",
- 10: "CANCELLED",
- 11: "REPLAY_ERROR",
- 12: "DEADLINE_EXCEEDED",
-}
-var RpcError_ErrorCode_value = map[string]int32{
- "UNKNOWN": 0,
- "CALL_NOT_FOUND": 1,
- "PARSE_ERROR": 2,
- "SECURITY_VIOLATION": 3,
- "OVER_QUOTA": 4,
- "REQUEST_TOO_LARGE": 5,
- "CAPABILITY_DISABLED": 6,
- "FEATURE_DISABLED": 7,
- "BAD_REQUEST": 8,
- "RESPONSE_TOO_LARGE": 9,
- "CANCELLED": 10,
- "REPLAY_ERROR": 11,
- "DEADLINE_EXCEEDED": 12,
-}
-
-func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
- p := new(RpcError_ErrorCode)
- *p = x
- return p
-}
-func (x RpcError_ErrorCode) String() string {
- return proto.EnumName(RpcError_ErrorCode_name, int32(x))
-}
-func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
- if err != nil {
- return err
- }
- *x = RpcError_ErrorCode(value)
- return nil
-}
-func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
-}
-
-type Request struct {
- ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
- Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
- Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
- RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-func (*Request) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
-}
-func (m *Request) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Request.Unmarshal(m, b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Request.Marshal(b, m, deterministic)
-}
-func (dst *Request) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Request.Merge(dst, src)
-}
-func (m *Request) XXX_Size() int {
- return xxx_messageInfo_Request.Size(m)
-}
-func (m *Request) XXX_DiscardUnknown() {
- xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-func (m *Request) GetServiceName() string {
- if m != nil && m.ServiceName != nil {
- return *m.ServiceName
- }
- return ""
-}
-
-func (m *Request) GetMethod() string {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return ""
-}
-
-func (m *Request) GetRequest() []byte {
- if m != nil {
- return m.Request
- }
- return nil
-}
-
-func (m *Request) GetRequestId() string {
- if m != nil && m.RequestId != nil {
- return *m.RequestId
- }
- return ""
-}
-
-type ApplicationError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ApplicationError) Reset() { *m = ApplicationError{} }
-func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
-func (*ApplicationError) ProtoMessage() {}
-func (*ApplicationError) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
-}
-func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
-}
-func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
-}
-func (dst *ApplicationError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ApplicationError.Merge(dst, src)
-}
-func (m *ApplicationError) XXX_Size() int {
- return xxx_messageInfo_ApplicationError.Size(m)
-}
-func (m *ApplicationError) XXX_DiscardUnknown() {
- xxx_messageInfo_ApplicationError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
-
-func (m *ApplicationError) GetCode() int32 {
- if m != nil && m.Code != nil {
- return *m.Code
- }
- return 0
-}
-
-func (m *ApplicationError) GetDetail() string {
- if m != nil && m.Detail != nil {
- return *m.Detail
- }
- return ""
-}
-
-type RpcError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RpcError) Reset() { *m = RpcError{} }
-func (m *RpcError) String() string { return proto.CompactTextString(m) }
-func (*RpcError) ProtoMessage() {}
-func (*RpcError) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
-}
-func (m *RpcError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RpcError.Unmarshal(m, b)
-}
-func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
-}
-func (dst *RpcError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RpcError.Merge(dst, src)
-}
-func (m *RpcError) XXX_Size() int {
- return xxx_messageInfo_RpcError.Size(m)
-}
-func (m *RpcError) XXX_DiscardUnknown() {
- xxx_messageInfo_RpcError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RpcError proto.InternalMessageInfo
-
-func (m *RpcError) GetCode() int32 {
- if m != nil && m.Code != nil {
- return *m.Code
- }
- return 0
-}
-
-func (m *RpcError) GetDetail() string {
- if m != nil && m.Detail != nil {
- return *m.Detail
- }
- return ""
-}
-
-type Response struct {
- Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
- Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
- ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
- JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
- RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Response) Reset() { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage() {}
-func (*Response) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
-}
-func (m *Response) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Response.Unmarshal(m, b)
-}
-func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Response.Marshal(b, m, deterministic)
-}
-func (dst *Response) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Response.Merge(dst, src)
-}
-func (m *Response) XXX_Size() int {
- return xxx_messageInfo_Response.Size(m)
-}
-func (m *Response) XXX_DiscardUnknown() {
- xxx_messageInfo_Response.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Response proto.InternalMessageInfo
-
-func (m *Response) GetResponse() []byte {
- if m != nil {
- return m.Response
- }
- return nil
-}
-
-func (m *Response) GetException() []byte {
- if m != nil {
- return m.Exception
- }
- return nil
-}
-
-func (m *Response) GetApplicationError() *ApplicationError {
- if m != nil {
- return m.ApplicationError
- }
- return nil
-}
-
-func (m *Response) GetJavaException() []byte {
- if m != nil {
- return m.JavaException
- }
- return nil
-}
-
-func (m *Response) GetRpcError() *RpcError {
- if m != nil {
- return m.RpcError
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Request)(nil), "remote_api.Request")
- proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
- proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
- proto.RegisterType((*Response)(nil), "remote_api.Response")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
-}
-
-var fileDescriptor_remote_api_1978114ec33a273d = []byte{
- // 531 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
- 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
- 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
- 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
- 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
- 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
- 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
- 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
- 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
- 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
- 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
- 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
- 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
- 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
- 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
- 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
- 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
- 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
- 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
- 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
- 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
- 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
- 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
- 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
- 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
- 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
- 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
- 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
- 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
- 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
- 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
- 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
- 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
- 0x03, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
deleted file mode 100644
index f21763a4e..000000000
--- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-syntax = "proto2";
-option go_package = "remote_api";
-
-package remote_api;
-
-message Request {
- required string service_name = 2;
- required string method = 3;
- required bytes request = 4;
- optional string request_id = 5;
-}
-
-message ApplicationError {
- required int32 code = 1;
- required string detail = 2;
-}
-
-message RpcError {
- enum ErrorCode {
- UNKNOWN = 0;
- CALL_NOT_FOUND = 1;
- PARSE_ERROR = 2;
- SECURITY_VIOLATION = 3;
- OVER_QUOTA = 4;
- REQUEST_TOO_LARGE = 5;
- CAPABILITY_DISABLED = 6;
- FEATURE_DISABLED = 7;
- BAD_REQUEST = 8;
- RESPONSE_TOO_LARGE = 9;
- CANCELLED = 10;
- REPLAY_ERROR = 11;
- DEADLINE_EXCEEDED = 12;
- }
- required int32 code = 1;
- optional string detail = 2;
-}
-
-message Response {
- optional bytes response = 1;
- optional bytes exception = 2;
- optional ApplicationError application_error = 3;
- optional bytes java_exception = 4;
- optional RpcError rpc_error = 5;
-}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
deleted file mode 100644
index 9006ae653..000000000
--- a/vendor/google.golang.org/appengine/internal/transaction.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements hooks for applying datastore transactions.
-
-import (
- "errors"
- "reflect"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-
- basepb "google.golang.org/appengine/internal/base"
- pb "google.golang.org/appengine/internal/datastore"
-)
-
-var transactionSetters = make(map[reflect.Type]reflect.Value)
-
-// RegisterTransactionSetter registers a function that sets transaction information
-// in a protocol buffer message. f should be a function with two arguments,
-// the first being a protocol buffer type, and the second being *datastore.Transaction.
-func RegisterTransactionSetter(f interface{}) {
- v := reflect.ValueOf(f)
- transactionSetters[v.Type().In(0)] = v
-}
-
-// applyTransaction applies the transaction t to message pb
-// by using the relevant setter passed to RegisterTransactionSetter.
-func applyTransaction(pb proto.Message, t *pb.Transaction) {
- v := reflect.ValueOf(pb)
- if f, ok := transactionSetters[v.Type()]; ok {
- f.Call([]reflect.Value{v, reflect.ValueOf(t)})
- }
-}
-
-var transactionKey = "used for *Transaction"
-
-func transactionFromContext(ctx netcontext.Context) *transaction {
- t, _ := ctx.Value(&transactionKey).(*transaction)
- return t
-}
-
-func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
- return netcontext.WithValue(ctx, &transactionKey, t)
-}
-
-type transaction struct {
- transaction pb.Transaction
- finished bool
-}
-
-var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
-
-func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
- if transactionFromContext(c) != nil {
- return nil, errors.New("nested transactions are not supported")
- }
-
- // Begin the transaction.
- t := &transaction{}
- req := &pb.BeginTransactionRequest{
- App: proto.String(FullyQualifiedAppID(c)),
- }
- if xg {
- req.AllowMultipleEg = proto.Bool(true)
- }
- if previousTransaction != nil {
- req.PreviousTransaction = previousTransaction
- }
- if readOnly {
- req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
- } else {
- req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
- }
- if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
- return nil, err
- }
-
- // Call f, rolling back the transaction if f returns a non-nil error, or panics.
- // The panic is not recovered.
- defer func() {
- if t.finished {
- return
- }
- t.finished = true
- // Ignore the error return value, since we are already returning a non-nil
- // error (or we're panicking).
- Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
- }()
- if err := f(withTransaction(c, t)); err != nil {
- return &t.transaction, err
- }
- t.finished = true
-
- // Commit the transaction.
- res := &pb.CommitResponse{}
- err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
- if ae, ok := err.(*APIError); ok {
- /* TODO: restore this conditional
- if appengine.IsDevAppServer() {
- */
- // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
- // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
- if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
- return &t.transaction, ErrConcurrentTransaction
- }
- if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
- return &t.transaction, ErrConcurrentTransaction
- }
- }
- return &t.transaction, err
-}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
deleted file mode 100644
index 5f727750a..000000000
--- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
+++ /dev/null
@@ -1,527 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
-
-package urlfetch
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type URLFetchServiceError_ErrorCode int32
-
-const (
- URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
- URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
- URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
- URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
- URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
- URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
- URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
- URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
- URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
- URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
- URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
- URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
- URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
-)
-
-var URLFetchServiceError_ErrorCode_name = map[int32]string{
- 0: "OK",
- 1: "INVALID_URL",
- 2: "FETCH_ERROR",
- 3: "UNSPECIFIED_ERROR",
- 4: "RESPONSE_TOO_LARGE",
- 5: "DEADLINE_EXCEEDED",
- 6: "SSL_CERTIFICATE_ERROR",
- 7: "DNS_ERROR",
- 8: "CLOSED",
- 9: "INTERNAL_TRANSIENT_ERROR",
- 10: "TOO_MANY_REDIRECTS",
- 11: "MALFORMED_REPLY",
- 12: "CONNECTION_ERROR",
-}
-var URLFetchServiceError_ErrorCode_value = map[string]int32{
- "OK": 0,
- "INVALID_URL": 1,
- "FETCH_ERROR": 2,
- "UNSPECIFIED_ERROR": 3,
- "RESPONSE_TOO_LARGE": 4,
- "DEADLINE_EXCEEDED": 5,
- "SSL_CERTIFICATE_ERROR": 6,
- "DNS_ERROR": 7,
- "CLOSED": 8,
- "INTERNAL_TRANSIENT_ERROR": 9,
- "TOO_MANY_REDIRECTS": 10,
- "MALFORMED_REPLY": 11,
- "CONNECTION_ERROR": 12,
-}
-
-func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
- p := new(URLFetchServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x URLFetchServiceError_ErrorCode) String() string {
- return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
-}
-func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = URLFetchServiceError_ErrorCode(value)
- return nil
-}
-func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
-}
-
-type URLFetchRequest_RequestMethod int32
-
-const (
- URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
- URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
- URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
- URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
- URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
- URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
-)
-
-var URLFetchRequest_RequestMethod_name = map[int32]string{
- 1: "GET",
- 2: "POST",
- 3: "HEAD",
- 4: "PUT",
- 5: "DELETE",
- 6: "PATCH",
-}
-var URLFetchRequest_RequestMethod_value = map[string]int32{
- "GET": 1,
- "POST": 2,
- "HEAD": 3,
- "PUT": 4,
- "DELETE": 5,
- "PATCH": 6,
-}
-
-func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
- p := new(URLFetchRequest_RequestMethod)
- *p = x
- return p
-}
-func (x URLFetchRequest_RequestMethod) String() string {
- return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
-}
-func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
- if err != nil {
- return err
- }
- *x = URLFetchRequest_RequestMethod(value)
- return nil
-}
-func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
-}
-
-type URLFetchServiceError struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
-func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
-func (*URLFetchServiceError) ProtoMessage() {}
-func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
-}
-func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
-}
-func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
-}
-func (m *URLFetchServiceError) XXX_Size() int {
- return xxx_messageInfo_URLFetchServiceError.Size(m)
-}
-func (m *URLFetchServiceError) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
-
-type URLFetchRequest struct {
- Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
- Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
- Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
- Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
- FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
- Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
- MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
-func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
-func (*URLFetchRequest) ProtoMessage() {}
-func (*URLFetchRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
-}
-func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
-}
-func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchRequest.Merge(dst, src)
-}
-func (m *URLFetchRequest) XXX_Size() int {
- return xxx_messageInfo_URLFetchRequest.Size(m)
-}
-func (m *URLFetchRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
-
-const Default_URLFetchRequest_FollowRedirects bool = true
-const Default_URLFetchRequest_MustValidateServerCertificate bool = true
-
-func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return URLFetchRequest_GET
-}
-
-func (m *URLFetchRequest) GetUrl() string {
- if m != nil && m.Url != nil {
- return *m.Url
- }
- return ""
-}
-
-func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *URLFetchRequest) GetPayload() []byte {
- if m != nil {
- return m.Payload
- }
- return nil
-}
-
-func (m *URLFetchRequest) GetFollowRedirects() bool {
- if m != nil && m.FollowRedirects != nil {
- return *m.FollowRedirects
- }
- return Default_URLFetchRequest_FollowRedirects
-}
-
-func (m *URLFetchRequest) GetDeadline() float64 {
- if m != nil && m.Deadline != nil {
- return *m.Deadline
- }
- return 0
-}
-
-func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
- if m != nil && m.MustValidateServerCertificate != nil {
- return *m.MustValidateServerCertificate
- }
- return Default_URLFetchRequest_MustValidateServerCertificate
-}
-
-type URLFetchRequest_Header struct {
- Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
- Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
-func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
-func (*URLFetchRequest_Header) ProtoMessage() {}
-func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
-}
-func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
-}
-func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
-}
-func (m *URLFetchRequest_Header) XXX_Size() int {
- return xxx_messageInfo_URLFetchRequest_Header.Size(m)
-}
-func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
-
-func (m *URLFetchRequest_Header) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *URLFetchRequest_Header) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-type URLFetchResponse struct {
- Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
- StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
- Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
- ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
- ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
- ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
- FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
- ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
- ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
- ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
-func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
-func (*URLFetchResponse) ProtoMessage() {}
-func (*URLFetchResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
-}
-func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
-}
-func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchResponse.Merge(dst, src)
-}
-func (m *URLFetchResponse) XXX_Size() int {
- return xxx_messageInfo_URLFetchResponse.Size(m)
-}
-func (m *URLFetchResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
-
-const Default_URLFetchResponse_ContentWasTruncated bool = false
-const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
-const Default_URLFetchResponse_ApiBytesSent int64 = 0
-const Default_URLFetchResponse_ApiBytesReceived int64 = 0
-
-func (m *URLFetchResponse) GetContent() []byte {
- if m != nil {
- return m.Content
- }
- return nil
-}
-
-func (m *URLFetchResponse) GetStatusCode() int32 {
- if m != nil && m.StatusCode != nil {
- return *m.StatusCode
- }
- return 0
-}
-
-func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *URLFetchResponse) GetContentWasTruncated() bool {
- if m != nil && m.ContentWasTruncated != nil {
- return *m.ContentWasTruncated
- }
- return Default_URLFetchResponse_ContentWasTruncated
-}
-
-func (m *URLFetchResponse) GetExternalBytesSent() int64 {
- if m != nil && m.ExternalBytesSent != nil {
- return *m.ExternalBytesSent
- }
- return 0
-}
-
-func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
- if m != nil && m.ExternalBytesReceived != nil {
- return *m.ExternalBytesReceived
- }
- return 0
-}
-
-func (m *URLFetchResponse) GetFinalUrl() string {
- if m != nil && m.FinalUrl != nil {
- return *m.FinalUrl
- }
- return ""
-}
-
-func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
- if m != nil && m.ApiCpuMilliseconds != nil {
- return *m.ApiCpuMilliseconds
- }
- return Default_URLFetchResponse_ApiCpuMilliseconds
-}
-
-func (m *URLFetchResponse) GetApiBytesSent() int64 {
- if m != nil && m.ApiBytesSent != nil {
- return *m.ApiBytesSent
- }
- return Default_URLFetchResponse_ApiBytesSent
-}
-
-func (m *URLFetchResponse) GetApiBytesReceived() int64 {
- if m != nil && m.ApiBytesReceived != nil {
- return *m.ApiBytesReceived
- }
- return Default_URLFetchResponse_ApiBytesReceived
-}
-
-type URLFetchResponse_Header struct {
- Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
- Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
-func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
-func (*URLFetchResponse_Header) ProtoMessage() {}
-func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
-}
-func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
-}
-func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
-}
-func (m *URLFetchResponse_Header) XXX_Size() int {
- return xxx_messageInfo_URLFetchResponse_Header.Size(m)
-}
-func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
-
-func (m *URLFetchResponse_Header) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *URLFetchResponse_Header) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError")
- proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest")
- proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header")
- proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse")
- proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
-}
-
-var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
- // 770 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
- 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
- 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e,
- 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d,
- 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b,
- 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27,
- 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92,
- 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7,
- 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17,
- 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec,
- 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c,
- 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a,
- 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01,
- 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14,
- 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f,
- 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07,
- 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87,
- 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a,
- 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a,
- 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37,
- 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc,
- 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde,
- 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71,
- 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17,
- 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea,
- 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4,
- 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6,
- 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96,
- 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d,
- 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d,
- 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb,
- 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad,
- 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86,
- 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20,
- 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e,
- 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f,
- 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21,
- 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c,
- 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b,
- 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6,
- 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02,
- 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b,
- 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9,
- 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e,
- 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97,
- 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3,
- 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8,
- 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05,
- 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
deleted file mode 100644
index f695edf6a..000000000
--- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+++ /dev/null
@@ -1,64 +0,0 @@
-syntax = "proto2";
-option go_package = "urlfetch";
-
-package appengine;
-
-message URLFetchServiceError {
- enum ErrorCode {
- OK = 0;
- INVALID_URL = 1;
- FETCH_ERROR = 2;
- UNSPECIFIED_ERROR = 3;
- RESPONSE_TOO_LARGE = 4;
- DEADLINE_EXCEEDED = 5;
- SSL_CERTIFICATE_ERROR = 6;
- DNS_ERROR = 7;
- CLOSED = 8;
- INTERNAL_TRANSIENT_ERROR = 9;
- TOO_MANY_REDIRECTS = 10;
- MALFORMED_REPLY = 11;
- CONNECTION_ERROR = 12;
- }
-}
-
-message URLFetchRequest {
- enum RequestMethod {
- GET = 1;
- POST = 2;
- HEAD = 3;
- PUT = 4;
- DELETE = 5;
- PATCH = 6;
- }
- required RequestMethod Method = 1;
- required string Url = 2;
- repeated group Header = 3 {
- required string Key = 4;
- required string Value = 5;
- }
- optional bytes Payload = 6 [ctype=CORD];
-
- optional bool FollowRedirects = 7 [default=true];
-
- optional double Deadline = 8;
-
- optional bool MustValidateServerCertificate = 9 [default=true];
-}
-
-message URLFetchResponse {
- optional bytes Content = 1;
- required int32 StatusCode = 2;
- repeated group Header = 3 {
- required string Key = 4;
- required string Value = 5;
- }
- optional bool ContentWasTruncated = 6 [default=false];
- optional int64 ExternalBytesSent = 7;
- optional int64 ExternalBytesReceived = 8;
-
- optional string FinalUrl = 9;
-
- optional int64 ApiCpuMilliseconds = 10 [default=0];
- optional int64 ApiBytesSent = 11 [default=0];
- optional int64 ApiBytesReceived = 12 [default=0];
-}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
deleted file mode 100644
index 6ffe1e6d9..000000000
--- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package urlfetch provides an http.RoundTripper implementation
-// for fetching URLs via App Engine's urlfetch service.
-package urlfetch // import "google.golang.org/appengine/urlfetch"
-
-import (
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
- pb "google.golang.org/appengine/internal/urlfetch"
-)
-
-// Transport is an implementation of http.RoundTripper for
-// App Engine. Users should generally create an http.Client using
-// this transport and use the Client rather than using this transport
-// directly.
-type Transport struct {
- Context context.Context
-
- // Controls whether the application checks the validity of SSL certificates
- // over HTTPS connections. A value of false (the default) instructs the
- // application to send a request to the server only if the certificate is
- // valid and signed by a trusted certificate authority (CA), and also
- // includes a hostname that matches the certificate. A value of true
- // instructs the application to perform no certificate validation.
- AllowInvalidServerCertificate bool
-}
-
-// Verify statically that *Transport implements http.RoundTripper.
-var _ http.RoundTripper = (*Transport)(nil)
-
-// Client returns an *http.Client using a default urlfetch Transport. This
-// client will have the default deadline of 5 seconds, and will check the
-// validity of SSL certificates.
-//
-// Any deadline of the provided context will be used for requests through this client;
-// if the client does not have a deadline then a 5 second default is used.
-func Client(ctx context.Context) *http.Client {
- return &http.Client{
- Transport: &Transport{
- Context: ctx,
- },
- }
-}
-
-type bodyReader struct {
- content []byte
- truncated bool
- closed bool
-}
-
-// ErrTruncatedBody is the error returned after the final Read() from a
-// response's Body if the body has been truncated by App Engine's proxy.
-var ErrTruncatedBody = errors.New("urlfetch: truncated body")
-
-func statusCodeToText(code int) string {
- if t := http.StatusText(code); t != "" {
- return t
- }
- return strconv.Itoa(code)
-}
-
-func (br *bodyReader) Read(p []byte) (n int, err error) {
- if br.closed {
- if br.truncated {
- return 0, ErrTruncatedBody
- }
- return 0, io.EOF
- }
- n = copy(p, br.content)
- if n > 0 {
- br.content = br.content[n:]
- return
- }
- if br.truncated {
- br.closed = true
- return 0, ErrTruncatedBody
- }
- return 0, io.EOF
-}
-
-func (br *bodyReader) Close() error {
- br.closed = true
- br.content = nil
- return nil
-}
-
-// A map of the URL Fetch-accepted methods that take a request body.
-var methodAcceptsRequestBody = map[string]bool{
- "POST": true,
- "PUT": true,
- "PATCH": true,
-}
-
-// urlString returns a valid string given a URL. This function is necessary because
-// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
-// See http://code.google.com/p/go/issues/detail?id=4860.
-func urlString(u *url.URL) string {
- if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
- return u.String()
- }
- aux := *u
- aux.Opaque = "//" + aux.Host + aux.Opaque
- return aux.String()
-}
-
-// RoundTrip issues a single HTTP request and returns its response. Per the
-// http.RoundTripper interface, RoundTrip only returns an error if there
-// was an unsupported request or the URL Fetch proxy fails.
-// Note that HTTP response codes such as 5xx, 403, 404, etc are not
-// errors as far as the transport is concerned and will be returned
-// with err set to nil.
-func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
- methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
- if !ok {
- return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
- }
-
- method := pb.URLFetchRequest_RequestMethod(methNum)
-
- freq := &pb.URLFetchRequest{
- Method: &method,
- Url: proto.String(urlString(req.URL)),
- FollowRedirects: proto.Bool(false), // http.Client's responsibility
- MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
- }
- if deadline, ok := t.Context.Deadline(); ok {
- freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
- }
-
- for k, vals := range req.Header {
- for _, val := range vals {
- freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
- Key: proto.String(k),
- Value: proto.String(val),
- })
- }
- }
- if methodAcceptsRequestBody[req.Method] && req.Body != nil {
- // Avoid a []byte copy if req.Body has a Bytes method.
- switch b := req.Body.(type) {
- case interface {
- Bytes() []byte
- }:
- freq.Payload = b.Bytes()
- default:
- freq.Payload, err = ioutil.ReadAll(req.Body)
- if err != nil {
- return nil, err
- }
- }
- }
-
- fres := &pb.URLFetchResponse{}
- if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
- return nil, err
- }
-
- res = &http.Response{}
- res.StatusCode = int(*fres.StatusCode)
- res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
- res.Header = make(http.Header)
- res.Request = req
-
- // Faked:
- res.ProtoMajor = 1
- res.ProtoMinor = 1
- res.Proto = "HTTP/1.1"
- res.Close = true
-
- for _, h := range fres.Header {
- hkey := http.CanonicalHeaderKey(*h.Key)
- hval := *h.Value
- if hkey == "Content-Length" {
- // Will get filled in below for all but HEAD requests.
- if req.Method == "HEAD" {
- res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
- }
- continue
- }
- res.Header.Add(hkey, hval)
- }
-
- if req.Method != "HEAD" {
- res.ContentLength = int64(len(fres.Content))
- }
-
- truncated := fres.GetContentWasTruncated()
- res.Body = &bodyReader{content: fres.Content, truncated: truncated}
- return
-}
-
-func init() {
- internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
- internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
deleted file mode 100644
index d18a17885..000000000
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
+++ /dev/null
@@ -1,23 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-reviewers:
-- thockin
-- lavalamp
-- smarterclayton
-- wojtek-t
-- deads2k
-- brendandburns
-- derekwaynecarr
-- caesarxuchao
-- mikedanese
-- liggitt
-- nikhiljindal
-- gmarek
-- erictune
-- saad-ali
-- janetkuo
-- tallclair
-- dims
-- hongchaodeng
-- krousey
-- cjcullen
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
deleted file mode 100644
index 167baf680..000000000
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package errors provides detailed error types for api field validation.
-package errors // import "k8s.io/apimachinery/pkg/api/errors"
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
deleted file mode 100644
index d3927d817..000000000
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ /dev/null
@@ -1,697 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package errors
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "reflect"
- "strings"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/util/validation/field"
-)
-
-// StatusError is an error intended for consumption by a REST API server; it can also be
-// reconstructed by clients from a REST response. Public to allow easy type switches.
-type StatusError struct {
- ErrStatus metav1.Status
-}
-
-// APIStatus is exposed by errors that can be converted to an api.Status object
-// for finer grained details.
-type APIStatus interface {
- Status() metav1.Status
-}
-
-var _ error = &StatusError{}
-
-// Error implements the Error interface.
-func (e *StatusError) Error() string {
- return e.ErrStatus.Message
-}
-
-// Status allows access to e's status without having to know the detailed workings
-// of StatusError.
-func (e *StatusError) Status() metav1.Status {
- return e.ErrStatus
-}
-
-// DebugError reports extended info about the error to debug output.
-func (e *StatusError) DebugError() (string, []interface{}) {
- if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil {
- return "server response object: %s", []interface{}{string(out)}
- }
- return "server response object: %#v", []interface{}{e.ErrStatus}
-}
-
-// HasStatusCause returns true if the provided error has a details cause
-// with the provided type name.
-func HasStatusCause(err error, name metav1.CauseType) bool {
- _, ok := StatusCause(err, name)
- return ok
-}
-
-// StatusCause returns the named cause from the provided error if it exists and
-// the error is of the type APIStatus. Otherwise it returns false.
-func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) {
- apierr, ok := err.(APIStatus)
- if !ok || apierr == nil || apierr.Status().Details == nil {
- return metav1.StatusCause{}, false
- }
- for _, cause := range apierr.Status().Details.Causes {
- if cause.Type == name {
- return cause, true
- }
- }
- return metav1.StatusCause{}, false
-}
-
-// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object.
-type UnexpectedObjectError struct {
- Object runtime.Object
-}
-
-// Error returns an error message describing 'u'.
-func (u *UnexpectedObjectError) Error() string {
- return fmt.Sprintf("unexpected object: %v", u.Object)
-}
-
-// FromObject generates an StatusError from an metav1.Status, if that is the type of obj; otherwise,
-// returns an UnexpecteObjectError.
-func FromObject(obj runtime.Object) error {
- switch t := obj.(type) {
- case *metav1.Status:
- return &StatusError{ErrStatus: *t}
- case runtime.Unstructured:
- var status metav1.Status
- obj := t.UnstructuredContent()
- if !reflect.DeepEqual(obj["kind"], "Status") {
- break
- }
- if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil {
- return err
- }
- if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" {
- break
- }
- return &StatusError{ErrStatus: status}
- }
- return &UnexpectedObjectError{obj}
-}
-
-// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found.
-func NewNotFound(qualifiedResource schema.GroupResource, name string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusNotFound,
- Reason: metav1.StatusReasonNotFound,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
- },
- Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name),
- }}
-}
-
-// NewAlreadyExists returns an error indicating the item requested exists by that identifier.
-func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusConflict,
- Reason: metav1.StatusReasonAlreadyExists,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
- },
- Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name),
- }}
-}
-
-// NewUnauthorized returns an error indicating the client is not authorized to perform the requested
-// action.
-func NewUnauthorized(reason string) *StatusError {
- message := reason
- if len(message) == 0 {
- message = "not authorized"
- }
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusUnauthorized,
- Reason: metav1.StatusReasonUnauthorized,
- Message: message,
- }}
-}
-
-// NewForbidden returns an error indicating the requested action was forbidden
-func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
- var message string
- if qualifiedResource.Empty() {
- message = fmt.Sprintf("forbidden: %v", err)
- } else if name == "" {
- message = fmt.Sprintf("%s is forbidden: %v", qualifiedResource.String(), err)
- } else {
- message = fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err)
- }
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusForbidden,
- Reason: metav1.StatusReasonForbidden,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
- },
- Message: message,
- }}
-}
-
-// NewConflict returns an error indicating the item can't be updated as provided.
-func NewConflict(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusConflict,
- Reason: metav1.StatusReasonConflict,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
- },
- Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err),
- }}
-}
-
-// NewApplyConflict returns an error including details on the requests apply conflicts
-func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError {
- return &StatusError{ErrStatus: metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusConflict,
- Reason: metav1.StatusReasonConflict,
- Details: &metav1.StatusDetails{
- // TODO: Get obj details here?
- Causes: causes,
- },
- Message: message,
- }}
-}
-
-// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
-// DEPRECATED: Please use NewResourceExpired instead.
-func NewGone(message string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusGone,
- Reason: metav1.StatusReasonGone,
- Message: message,
- }}
-}
-
-// NewResourceExpired creates an error that indicates that the requested resource content has expired from
-// the server (usually due to a resourceVersion that is too old).
-func NewResourceExpired(message string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusGone,
- Reason: metav1.StatusReasonExpired,
- Message: message,
- }}
-}
-
-// NewInvalid returns an error indicating the item is invalid and cannot be processed.
-func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError {
- causes := make([]metav1.StatusCause, 0, len(errs))
- for i := range errs {
- err := errs[i]
- causes = append(causes, metav1.StatusCause{
- Type: metav1.CauseType(err.Type),
- Message: err.ErrorBody(),
- Field: err.Field,
- })
- }
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusUnprocessableEntity,
- Reason: metav1.StatusReasonInvalid,
- Details: &metav1.StatusDetails{
- Group: qualifiedKind.Group,
- Kind: qualifiedKind.Kind,
- Name: name,
- Causes: causes,
- },
- Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()),
- }}
-}
-
-// NewBadRequest creates an error that indicates that the request is invalid and can not be processed.
-func NewBadRequest(reason string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusBadRequest,
- Reason: metav1.StatusReasonBadRequest,
- Message: reason,
- }}
-}
-
-// NewTooManyRequests creates an error that indicates that the client must try again later because
-// the specified endpoint is not accepting requests. More specific details should be provided
-// if client should know why the failure was limited4.
-func NewTooManyRequests(message string, retryAfterSeconds int) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusTooManyRequests,
- Reason: metav1.StatusReasonTooManyRequests,
- Message: message,
- Details: &metav1.StatusDetails{
- RetryAfterSeconds: int32(retryAfterSeconds),
- },
- }}
-}
-
-// NewServiceUnavailable creates an error that indicates that the requested service is unavailable.
-func NewServiceUnavailable(reason string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusServiceUnavailable,
- Reason: metav1.StatusReasonServiceUnavailable,
- Message: reason,
- }}
-}
-
-// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind.
-func NewMethodNotSupported(qualifiedResource schema.GroupResource, action string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusMethodNotAllowed,
- Reason: metav1.StatusReasonMethodNotAllowed,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- },
- Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()),
- }}
-}
-
-// NewServerTimeout returns an error indicating the requested action could not be completed due to a
-// transient error, and the client should try again.
-func NewServerTimeout(qualifiedResource schema.GroupResource, operation string, retryAfterSeconds int) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusInternalServerError,
- Reason: metav1.StatusReasonServerTimeout,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: operation,
- RetryAfterSeconds: int32(retryAfterSeconds),
- },
- Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()),
- }}
-}
-
-// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we
-// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this.
-func NewServerTimeoutForKind(qualifiedKind schema.GroupKind, operation string, retryAfterSeconds int) *StatusError {
- return NewServerTimeout(schema.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds)
-}
-
-// NewInternalError returns an error indicating the item is invalid and cannot be processed.
-func NewInternalError(err error) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusInternalServerError,
- Reason: metav1.StatusReasonInternalError,
- Details: &metav1.StatusDetails{
- Causes: []metav1.StatusCause{{Message: err.Error()}},
- },
- Message: fmt.Sprintf("Internal error occurred: %v", err),
- }}
-}
-
-// NewTimeoutError returns an error indicating that a timeout occurred before the request
-// could be completed. Clients may retry, but the operation may still complete.
-func NewTimeoutError(message string, retryAfterSeconds int) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusGatewayTimeout,
- Reason: metav1.StatusReasonTimeout,
- Message: fmt.Sprintf("Timeout: %s", message),
- Details: &metav1.StatusDetails{
- RetryAfterSeconds: int32(retryAfterSeconds),
- },
- }}
-}
-
-// NewTooManyRequestsError returns an error indicating that the request was rejected because
-// the server has received too many requests. Client should wait and retry. But if the request
-// is perishable, then the client should not retry the request.
-func NewTooManyRequestsError(message string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusTooManyRequests,
- Reason: metav1.StatusReasonTooManyRequests,
- Message: fmt.Sprintf("Too many requests: %s", message),
- }}
-}
-
-// NewRequestEntityTooLargeError returns an error indicating that the request
-// entity was too large.
-func NewRequestEntityTooLargeError(message string) *StatusError {
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusRequestEntityTooLarge,
- Reason: metav1.StatusReasonRequestEntityTooLarge,
- Message: fmt.Sprintf("Request entity too large: %s", message),
- }}
-}
-
-// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
-func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
- reason := metav1.StatusReasonUnknown
- message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code)
- switch code {
- case http.StatusConflict:
- if verb == "POST" {
- reason = metav1.StatusReasonAlreadyExists
- } else {
- reason = metav1.StatusReasonConflict
- }
- message = "the server reported a conflict"
- case http.StatusNotFound:
- reason = metav1.StatusReasonNotFound
- message = "the server could not find the requested resource"
- case http.StatusBadRequest:
- reason = metav1.StatusReasonBadRequest
- message = "the server rejected our request for an unknown reason"
- case http.StatusUnauthorized:
- reason = metav1.StatusReasonUnauthorized
- message = "the server has asked for the client to provide credentials"
- case http.StatusForbidden:
- reason = metav1.StatusReasonForbidden
- // the server message has details about who is trying to perform what action. Keep its message.
- message = serverMessage
- case http.StatusNotAcceptable:
- reason = metav1.StatusReasonNotAcceptable
- // the server message has details about what types are acceptable
- if len(serverMessage) == 0 || serverMessage == "unknown" {
- message = "the server was unable to respond with a content type that the client supports"
- } else {
- message = serverMessage
- }
- case http.StatusUnsupportedMediaType:
- reason = metav1.StatusReasonUnsupportedMediaType
- // the server message has details about what types are acceptable
- message = serverMessage
- case http.StatusMethodNotAllowed:
- reason = metav1.StatusReasonMethodNotAllowed
- message = "the server does not allow this method on the requested resource"
- case http.StatusUnprocessableEntity:
- reason = metav1.StatusReasonInvalid
- message = "the server rejected our request due to an error in our request"
- case http.StatusServiceUnavailable:
- reason = metav1.StatusReasonServiceUnavailable
- message = "the server is currently unable to handle the request"
- case http.StatusGatewayTimeout:
- reason = metav1.StatusReasonTimeout
- message = "the server was unable to return a response in the time allotted, but may still be processing the request"
- case http.StatusTooManyRequests:
- reason = metav1.StatusReasonTooManyRequests
- message = "the server has received too many requests and has asked us to try again later"
- default:
- if code >= 500 {
- reason = metav1.StatusReasonInternalError
- message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage)
- }
- }
- switch {
- case !qualifiedResource.Empty() && len(name) > 0:
- message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name)
- case !qualifiedResource.Empty():
- message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String())
- }
- var causes []metav1.StatusCause
- if isUnexpectedResponse {
- causes = []metav1.StatusCause{
- {
- Type: metav1.CauseTypeUnexpectedServerResponse,
- Message: serverMessage,
- },
- }
- } else {
- causes = nil
- }
- return &StatusError{metav1.Status{
- Status: metav1.StatusFailure,
- Code: int32(code),
- Reason: reason,
- Details: &metav1.StatusDetails{
- Group: qualifiedResource.Group,
- Kind: qualifiedResource.Resource,
- Name: name,
-
- Causes: causes,
- RetryAfterSeconds: int32(retryAfterSeconds),
- },
- Message: message,
- }}
-}
-
-// IsNotFound returns true if the specified error was created by NewNotFound.
-// It supports wrapped errors.
-func IsNotFound(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonNotFound
-}
-
-// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists.
-// It supports wrapped errors.
-func IsAlreadyExists(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonAlreadyExists
-}
-
-// IsConflict determines if the err is an error which indicates the provided update conflicts.
-// It supports wrapped errors.
-func IsConflict(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonConflict
-}
-
-// IsInvalid determines if the err is an error which indicates the provided resource is not valid.
-// It supports wrapped errors.
-func IsInvalid(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonInvalid
-}
-
-// IsGone is true if the error indicates the requested resource is no longer available.
-// It supports wrapped errors.
-func IsGone(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonGone
-}
-
-// IsResourceExpired is true if the error indicates the resource has expired and the current action is
-// no longer possible.
-// It supports wrapped errors.
-func IsResourceExpired(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonExpired
-}
-
-// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header
-// It supports wrapped errors.
-func IsNotAcceptable(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonNotAcceptable
-}
-
-// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header
-// It supports wrapped errors.
-func IsUnsupportedMediaType(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType
-}
-
-// IsMethodNotSupported determines if the err is an error which indicates the provided action could not
-// be performed because it is not supported by the server.
-// It supports wrapped errors.
-func IsMethodNotSupported(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed
-}
-
-// IsServiceUnavailable is true if the error indicates the underlying service is no longer available.
-// It supports wrapped errors.
-func IsServiceUnavailable(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonServiceUnavailable
-}
-
-// IsBadRequest determines if err is an error which indicates that the request is invalid.
-// It supports wrapped errors.
-func IsBadRequest(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonBadRequest
-}
-
-// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and
-// requires authentication by the user.
-// It supports wrapped errors.
-func IsUnauthorized(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonUnauthorized
-}
-
-// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot
-// be completed as requested.
-// It supports wrapped errors.
-func IsForbidden(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonForbidden
-}
-
-// IsTimeout determines if err is an error which indicates that request times out due to long
-// processing.
-// It supports wrapped errors.
-func IsTimeout(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonTimeout
-}
-
-// IsServerTimeout determines if err is an error which indicates that the request needs to be retried
-// by the client.
-// It supports wrapped errors.
-func IsServerTimeout(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonServerTimeout
-}
-
-// IsInternalError determines if err is an error which indicates an internal server error.
-// It supports wrapped errors.
-func IsInternalError(err error) bool {
- return ReasonForError(err) == metav1.StatusReasonInternalError
-}
-
-// IsTooManyRequests determines if err is an error which indicates that there are too many requests
-// that the server cannot handle.
-// It supports wrapped errors.
-func IsTooManyRequests(err error) bool {
- if ReasonForError(err) == metav1.StatusReasonTooManyRequests {
- return true
- }
- if status := APIStatus(nil); errors.As(err, &status) {
- return status.Status().Code == http.StatusTooManyRequests
- }
- return false
-}
-
-// IsRequestEntityTooLargeError determines if err is an error which indicates
-// the request entity is too large.
-// It supports wrapped errors.
-func IsRequestEntityTooLargeError(err error) bool {
- if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge {
- return true
- }
- if status := APIStatus(nil); errors.As(err, &status) {
- return status.Status().Code == http.StatusRequestEntityTooLarge
- }
- return false
-}
-
-// IsUnexpectedServerError returns true if the server response was not in the expected API format,
-// and may be the result of another HTTP actor.
-// It supports wrapped errors.
-func IsUnexpectedServerError(err error) bool {
- if status := APIStatus(nil); errors.As(err, &status) && status.Status().Details != nil {
- for _, cause := range status.Status().Details.Causes {
- if cause.Type == metav1.CauseTypeUnexpectedServerResponse {
- return true
- }
- }
- }
- return false
-}
-
-// IsUnexpectedObjectError determines if err is due to an unexpected object from the master.
-// It supports wrapped errors.
-func IsUnexpectedObjectError(err error) bool {
- uoe := &UnexpectedObjectError{}
- return err != nil && errors.As(err, &uoe)
-}
-
-// SuggestsClientDelay returns true if this error suggests a client delay as well as the
-// suggested seconds to wait, or false if the error does not imply a wait. It does not
-// address whether the error *should* be retried, since some errors (like a 3xx) may
-// request delay without retry.
-// It supports wrapped errors.
-func SuggestsClientDelay(err error) (int, bool) {
- if t := APIStatus(nil); errors.As(err, &t) && t.Status().Details != nil {
- switch t.Status().Reason {
- // this StatusReason explicitly requests the caller to delay the action
- case metav1.StatusReasonServerTimeout:
- return int(t.Status().Details.RetryAfterSeconds), true
- }
- // If the client requests that we retry after a certain number of seconds
- if t.Status().Details.RetryAfterSeconds > 0 {
- return int(t.Status().Details.RetryAfterSeconds), true
- }
- }
- return 0, false
-}
-
-// ReasonForError returns the HTTP status for a particular error.
-// It supports wrapped errors.
-func ReasonForError(err error) metav1.StatusReason {
- if status := APIStatus(nil); errors.As(err, &status) {
- return status.Status().Reason
- }
- return metav1.StatusReasonUnknown
-}
-
-// ErrorReporter converts generic errors into runtime.Object errors without
-// requiring the caller to take a dependency on meta/v1 (where Status lives).
-// This prevents circular dependencies in core watch code.
-type ErrorReporter struct {
- code int
- verb string
- reason string
-}
-
-// NewClientErrorReporter will respond with valid v1.Status objects that report
-// unexpected server responses. Primarily used by watch to report errors when
-// we attempt to decode a response from the server and it is not in the form
-// we expect. Because watch is a dependency of the core api, we can't return
-// meta/v1.Status in that package and so much inject this interface to convert a
-// generic error as appropriate. The reason is passed as a unique status cause
-// on the returned status, otherwise the generic "ClientError" is returned.
-func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter {
- return &ErrorReporter{
- code: code,
- verb: verb,
- reason: reason,
- }
-}
-
-// AsObject returns a valid error runtime.Object (a v1.Status) for the given
-// error, using the code and verb of the reporter type. The error is set to
-// indicate that this was an unexpected server response.
-func (r *ErrorReporter) AsObject(err error) runtime.Object {
- status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true)
- if status.ErrStatus.Details == nil {
- status.ErrStatus.Details = &metav1.StatusDetails{}
- }
- reason := r.reason
- if len(reason) == 0 {
- reason = "ClientError"
- }
- status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{
- Type: metav1.CauseType(reason),
- Message: err.Error(),
- })
- return &status.ErrStatus
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
deleted file mode 100644
index 7b101ea51..000000000
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package unstructured
-
-import (
- gojson "encoding/json"
- "fmt"
- "io"
- "strings"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/json"
- "k8s.io/klog/v2"
-)
-
-// NestedFieldCopy returns a deep copy of the value of a nested field.
-// Returns false if the value is missing.
-// No error is returned for a nil field.
-//
-// Note: fields passed to this function are treated as keys within the passed
-// object; no array/slice syntax is supported.
-func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- return runtime.DeepCopyJSONValue(val), true, nil
-}
-
-// NestedFieldNoCopy returns a reference to a nested field.
-// Returns false if value is not found and an error if unable
-// to traverse obj.
-//
-// Note: fields passed to this function are treated as keys within the passed
-// object; no array/slice syntax is supported.
-func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
- var val interface{} = obj
-
- for i, field := range fields {
- if val == nil {
- return nil, false, nil
- }
- if m, ok := val.(map[string]interface{}); ok {
- val, ok = m[field]
- if !ok {
- return nil, false, nil
- }
- } else {
- return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields[:i+1]), val, val)
- }
- }
- return val, true, nil
-}
-
-// NestedString returns the string value of a nested field.
-// Returns false if value is not found and an error if not a string.
-func NestedString(obj map[string]interface{}, fields ...string) (string, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return "", found, err
- }
- s, ok := val.(string)
- if !ok {
- return "", false, fmt.Errorf("%v accessor error: %v is of the type %T, expected string", jsonPath(fields), val, val)
- }
- return s, true, nil
-}
-
-// NestedBool returns the bool value of a nested field.
-// Returns false if value is not found and an error if not a bool.
-func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return false, found, err
- }
- b, ok := val.(bool)
- if !ok {
- return false, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected bool", jsonPath(fields), val, val)
- }
- return b, true, nil
-}
-
-// NestedFloat64 returns the float64 value of a nested field.
-// Returns false if value is not found and an error if not a float64.
-func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return 0, found, err
- }
- f, ok := val.(float64)
- if !ok {
- return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64", jsonPath(fields), val, val)
- }
- return f, true, nil
-}
-
-// NestedInt64 returns the int64 value of a nested field.
-// Returns false if value is not found and an error if not an int64.
-func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return 0, found, err
- }
- i, ok := val.(int64)
- if !ok {
- return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected int64", jsonPath(fields), val, val)
- }
- return i, true, nil
-}
-
-// NestedStringSlice returns a copy of []string value of a nested field.
-// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice.
-func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- m, ok := val.([]interface{})
- if !ok {
- return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
- }
- strSlice := make([]string, 0, len(m))
- for _, v := range m {
- if str, ok := v.(string); ok {
- strSlice = append(strSlice, str)
- } else {
- return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the slice: %v is of the type %T, expected string", jsonPath(fields), v, v)
- }
- }
- return strSlice, true, nil
-}
-
-// NestedSlice returns a deep copy of []interface{} value of a nested field.
-// Returns false if value is not found and an error if not a []interface{}.
-func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- _, ok := val.([]interface{})
- if !ok {
- return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
- }
- return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil
-}
-
-// NestedStringMap returns a copy of map[string]string value of a nested field.
-// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map.
-func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) {
- m, found, err := nestedMapNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- strMap := make(map[string]string, len(m))
- for k, v := range m {
- if str, ok := v.(string); ok {
- strMap[k] = str
- } else {
- return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v)
- }
- }
- return strMap, true, nil
-}
-
-// NestedMap returns a deep copy of map[string]interface{} value of a nested field.
-// Returns false if value is not found and an error if not a map[string]interface{}.
-func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
- m, found, err := nestedMapNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- return runtime.DeepCopyJSON(m), true, nil
-}
-
-// nestedMapNoCopy returns a map[string]interface{} value of a nested field.
-// Returns false if value is not found and an error if not a map[string]interface{}.
-func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
- val, found, err := NestedFieldNoCopy(obj, fields...)
- if !found || err != nil {
- return nil, found, err
- }
- m, ok := val.(map[string]interface{})
- if !ok {
- return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val)
- }
- return m, true, nil
-}
-
-// SetNestedField sets the value of a nested field to a deep copy of the value provided.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) error {
- return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...)
-}
-
-func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) error {
- m := obj
-
- for i, field := range fields[:len(fields)-1] {
- if val, ok := m[field]; ok {
- if valMap, ok := val.(map[string]interface{}); ok {
- m = valMap
- } else {
- return fmt.Errorf("value cannot be set because %v is not a map[string]interface{}", jsonPath(fields[:i+1]))
- }
- } else {
- newVal := make(map[string]interface{})
- m[field] = newVal
- m = newVal
- }
- }
- m[fields[len(fields)-1]] = value
- return nil
-}
-
-// SetNestedStringSlice sets the string slice value of a nested field.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) error {
- m := make([]interface{}, 0, len(value)) // convert []string into []interface{}
- for _, v := range value {
- m = append(m, v)
- }
- return setNestedFieldNoCopy(obj, m, fields...)
-}
-
-// SetNestedSlice sets the slice value of a nested field.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) error {
- return SetNestedField(obj, value, fields...)
-}
-
-// SetNestedStringMap sets the map[string]string value of a nested field.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) error {
- m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{}
- for k, v := range value {
- m[k] = v
- }
- return setNestedFieldNoCopy(obj, m, fields...)
-}
-
-// SetNestedMap sets the map[string]interface{} value of a nested field.
-// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
-func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error {
- return SetNestedField(obj, value, fields...)
-}
-
-// RemoveNestedField removes the nested field from the obj.
-func RemoveNestedField(obj map[string]interface{}, fields ...string) {
- m := obj
- for _, field := range fields[:len(fields)-1] {
- if x, ok := m[field].(map[string]interface{}); ok {
- m = x
- } else {
- return
- }
- }
- delete(m, fields[len(fields)-1])
-}
-
-func getNestedString(obj map[string]interface{}, fields ...string) string {
- val, found, err := NestedString(obj, fields...)
- if !found || err != nil {
- return ""
- }
- return val
-}
-
-func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 {
- val, found, err := NestedInt64(obj, fields...)
- if !found || err != nil {
- return nil
- }
- return &val
-}
-
-func jsonPath(fields []string) string {
- return "." + strings.Join(fields, ".")
-}
-
-func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference {
- // though this field is a *bool, but when decoded from JSON, it's
- // unmarshalled as bool.
- var controllerPtr *bool
- if controller, found, err := NestedBool(v, "controller"); err == nil && found {
- controllerPtr = &controller
- }
- var blockOwnerDeletionPtr *bool
- if blockOwnerDeletion, found, err := NestedBool(v, "blockOwnerDeletion"); err == nil && found {
- blockOwnerDeletionPtr = &blockOwnerDeletion
- }
- return metav1.OwnerReference{
- Kind: getNestedString(v, "kind"),
- Name: getNestedString(v, "name"),
- APIVersion: getNestedString(v, "apiVersion"),
- UID: types.UID(getNestedString(v, "uid")),
- Controller: controllerPtr,
- BlockOwnerDeletion: blockOwnerDeletionPtr,
- }
-}
-
-// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured
-// type, which can be used for generic access to objects without a predefined scheme.
-// TODO: move into serializer/json.
-var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{}
-
-type unstructuredJSONScheme struct{}
-
-const unstructuredJSONSchemeIdentifier runtime.Identifier = "unstructuredJSON"
-
-func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- var err error
- if obj != nil {
- err = s.decodeInto(data, obj)
- } else {
- obj, err = s.decode(data)
- }
-
- if err != nil {
- return nil, nil, err
- }
-
- gvk := obj.GetObjectKind().GroupVersionKind()
- if len(gvk.Kind) == 0 {
- return nil, &gvk, runtime.NewMissingKindErr(string(data))
- }
-
- return obj, &gvk, nil
-}
-
-func (s unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(s.Identifier(), s.doEncode, w)
- }
- return s.doEncode(obj, w)
-}
-
-func (unstructuredJSONScheme) doEncode(obj runtime.Object, w io.Writer) error {
- switch t := obj.(type) {
- case *Unstructured:
- return json.NewEncoder(w).Encode(t.Object)
- case *UnstructuredList:
- items := make([]interface{}, 0, len(t.Items))
- for _, i := range t.Items {
- items = append(items, i.Object)
- }
- listObj := make(map[string]interface{}, len(t.Object)+1)
- for k, v := range t.Object { // Make a shallow copy
- listObj[k] = v
- }
- listObj["items"] = items
- return json.NewEncoder(w).Encode(listObj)
- case *runtime.Unknown:
- // TODO: Unstructured needs to deal with ContentType.
- _, err := w.Write(t.Raw)
- return err
- default:
- return json.NewEncoder(w).Encode(t)
- }
-}
-
-// Identifier implements runtime.Encoder interface.
-func (unstructuredJSONScheme) Identifier() runtime.Identifier {
- return unstructuredJSONSchemeIdentifier
-}
-
-func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) {
- type detector struct {
- Items gojson.RawMessage
- }
- var det detector
- if err := json.Unmarshal(data, &det); err != nil {
- return nil, err
- }
-
- if det.Items != nil {
- list := &UnstructuredList{}
- err := s.decodeToList(data, list)
- return list, err
- }
-
- // No Items field, so it wasn't a list.
- unstruct := &Unstructured{}
- err := s.decodeToUnstructured(data, unstruct)
- return unstruct, err
-}
-
-func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error {
- switch x := obj.(type) {
- case *Unstructured:
- return s.decodeToUnstructured(data, x)
- case *UnstructuredList:
- return s.decodeToList(data, x)
- default:
- return json.Unmarshal(data, x)
- }
-}
-
-func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error {
- m := make(map[string]interface{})
- if err := json.Unmarshal(data, &m); err != nil {
- return err
- }
-
- unstruct.Object = m
-
- return nil
-}
-
-func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error {
- type decodeList struct {
- Items []gojson.RawMessage
- }
-
- var dList decodeList
- if err := json.Unmarshal(data, &dList); err != nil {
- return err
- }
-
- if err := json.Unmarshal(data, &list.Object); err != nil {
- return err
- }
-
- // For typed lists, e.g., a PodList, API server doesn't set each item's
- // APIVersion and Kind. We need to set it.
- listAPIVersion := list.GetAPIVersion()
- listKind := list.GetKind()
- itemKind := strings.TrimSuffix(listKind, "List")
-
- delete(list.Object, "items")
- list.Items = make([]Unstructured, 0, len(dList.Items))
- for _, i := range dList.Items {
- unstruct := &Unstructured{}
- if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil {
- return err
- }
- // This is hacky. Set the item's Kind and APIVersion to those inferred
- // from the List.
- if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 {
- unstruct.SetKind(itemKind)
- unstruct.SetAPIVersion(listAPIVersion)
- }
- list.Items = append(list.Items, *unstruct)
- }
- return nil
-}
-
-type jsonFallbackEncoder struct {
- encoder runtime.Encoder
- identifier runtime.Identifier
-}
-
-func NewJSONFallbackEncoder(encoder runtime.Encoder) runtime.Encoder {
- result := map[string]string{
- "name": "fallback",
- "base": string(encoder.Identifier()),
- }
- identifier, err := gojson.Marshal(result)
- if err != nil {
- klog.Fatalf("Failed marshaling identifier for jsonFallbackEncoder: %v", err)
- }
- return &jsonFallbackEncoder{
- encoder: encoder,
- identifier: runtime.Identifier(identifier),
- }
-}
-
-func (c *jsonFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error {
- // There is no need to handle runtime.CacheableObject, as we only
- // fallback to other encoders here.
- err := c.encoder.Encode(obj, w)
- if runtime.IsNotRegisteredError(err) {
- switch obj.(type) {
- case *Unstructured, *UnstructuredList:
- return UnstructuredJSONScheme.Encode(obj, w)
- }
- }
- return err
-}
-
-// Identifier implements runtime.Encoder interface.
-func (c *jsonFallbackEncoder) Identifier() runtime.Identifier {
- return c.identifier
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
deleted file mode 100644
index d1903394d..000000000
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package unstructured
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-// Unstructured allows objects that do not have Golang structs registered to be manipulated
-// generically. This can be used to deal with the API objects from a plug-in. Unstructured
-// objects still have functioning TypeMeta features-- kind, version, etc.
-//
-// WARNING: This object has accessors for the v1 standard metadata. You *MUST NOT* use this
-// type if you are dealing with objects that are not in the server meta v1 schema.
-//
-// TODO: make the serialization part of this type distinct from the field accessors.
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:deepcopy-gen=true
-type Unstructured struct {
- // Object is a JSON compatible map with string, float, int, bool, []interface{}, or
- // map[string]interface{}
- // children.
- Object map[string]interface{}
-}
-
-var _ metav1.Object = &Unstructured{}
-var _ runtime.Unstructured = &Unstructured{}
-var _ metav1.ListInterface = &Unstructured{}
-
-func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj }
-
-func (obj *Unstructured) IsList() bool {
- field, ok := obj.Object["items"]
- if !ok {
- return false
- }
- _, ok = field.([]interface{})
- return ok
-}
-func (obj *Unstructured) ToList() (*UnstructuredList, error) {
- if !obj.IsList() {
- // return an empty list back
- return &UnstructuredList{Object: obj.Object}, nil
- }
-
- ret := &UnstructuredList{}
- ret.Object = obj.Object
-
- err := obj.EachListItem(func(item runtime.Object) error {
- castItem := item.(*Unstructured)
- ret.Items = append(ret.Items, *castItem)
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- return ret, nil
-}
-
-func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error {
- field, ok := obj.Object["items"]
- if !ok {
- return errors.New("content is not a list")
- }
- items, ok := field.([]interface{})
- if !ok {
- return fmt.Errorf("content is not a list: %T", field)
- }
- for _, item := range items {
- child, ok := item.(map[string]interface{})
- if !ok {
- return fmt.Errorf("items member is not an object: %T", child)
- }
- if err := fn(&Unstructured{Object: child}); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (obj *Unstructured) UnstructuredContent() map[string]interface{} {
- if obj.Object == nil {
- return make(map[string]interface{})
- }
- return obj.Object
-}
-
-func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) {
- obj.Object = content
-}
-
-// MarshalJSON ensures that the unstructured object produces proper
-// JSON when passed to Go's standard JSON library.
-func (u *Unstructured) MarshalJSON() ([]byte, error) {
- var buf bytes.Buffer
- err := UnstructuredJSONScheme.Encode(u, &buf)
- return buf.Bytes(), err
-}
-
-// UnmarshalJSON ensures that the unstructured object properly decodes
-// JSON when passed to Go's standard JSON library.
-func (u *Unstructured) UnmarshalJSON(b []byte) error {
- _, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
- return err
-}
-
-// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
-// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
-func (in *Unstructured) NewEmptyInstance() runtime.Unstructured {
- out := new(Unstructured)
- if in != nil {
- out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind())
- }
- return out
-}
-
-func (in *Unstructured) DeepCopy() *Unstructured {
- if in == nil {
- return nil
- }
- out := new(Unstructured)
- *out = *in
- out.Object = runtime.DeepCopyJSON(in.Object)
- return out
-}
-
-func (u *Unstructured) setNestedField(value interface{}, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedField(u.Object, value, fields...)
-}
-
-func (u *Unstructured) setNestedStringSlice(value []string, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedStringSlice(u.Object, value, fields...)
-}
-
-func (u *Unstructured) setNestedSlice(value []interface{}, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedSlice(u.Object, value, fields...)
-}
-
-func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedStringMap(u.Object, value, fields...)
-}
-
-func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference {
- field, found, err := NestedFieldNoCopy(u.Object, "metadata", "ownerReferences")
- if !found || err != nil {
- return nil
- }
- original, ok := field.([]interface{})
- if !ok {
- return nil
- }
- ret := make([]metav1.OwnerReference, 0, len(original))
- for _, obj := range original {
- o, ok := obj.(map[string]interface{})
- if !ok {
- // expected map[string]interface{}, got something else
- return nil
- }
- ret = append(ret, extractOwnerReference(o))
- }
- return ret
-}
-
-func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) {
- if references == nil {
- RemoveNestedField(u.Object, "metadata", "ownerReferences")
- return
- }
-
- newReferences := make([]interface{}, 0, len(references))
- for _, reference := range references {
- out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference)
- if err != nil {
- utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err))
- continue
- }
- newReferences = append(newReferences, out)
- }
- u.setNestedField(newReferences, "metadata", "ownerReferences")
-}
-
-func (u *Unstructured) GetAPIVersion() string {
- return getNestedString(u.Object, "apiVersion")
-}
-
-func (u *Unstructured) SetAPIVersion(version string) {
- u.setNestedField(version, "apiVersion")
-}
-
-func (u *Unstructured) GetKind() string {
- return getNestedString(u.Object, "kind")
-}
-
-func (u *Unstructured) SetKind(kind string) {
- u.setNestedField(kind, "kind")
-}
-
-func (u *Unstructured) GetNamespace() string {
- return getNestedString(u.Object, "metadata", "namespace")
-}
-
-func (u *Unstructured) SetNamespace(namespace string) {
- if len(namespace) == 0 {
- RemoveNestedField(u.Object, "metadata", "namespace")
- return
- }
- u.setNestedField(namespace, "metadata", "namespace")
-}
-
-func (u *Unstructured) GetName() string {
- return getNestedString(u.Object, "metadata", "name")
-}
-
-func (u *Unstructured) SetName(name string) {
- if len(name) == 0 {
- RemoveNestedField(u.Object, "metadata", "name")
- return
- }
- u.setNestedField(name, "metadata", "name")
-}
-
-func (u *Unstructured) GetGenerateName() string {
- return getNestedString(u.Object, "metadata", "generateName")
-}
-
-func (u *Unstructured) SetGenerateName(generateName string) {
- if len(generateName) == 0 {
- RemoveNestedField(u.Object, "metadata", "generateName")
- return
- }
- u.setNestedField(generateName, "metadata", "generateName")
-}
-
-func (u *Unstructured) GetUID() types.UID {
- return types.UID(getNestedString(u.Object, "metadata", "uid"))
-}
-
-func (u *Unstructured) SetUID(uid types.UID) {
- if len(string(uid)) == 0 {
- RemoveNestedField(u.Object, "metadata", "uid")
- return
- }
- u.setNestedField(string(uid), "metadata", "uid")
-}
-
-func (u *Unstructured) GetResourceVersion() string {
- return getNestedString(u.Object, "metadata", "resourceVersion")
-}
-
-func (u *Unstructured) SetResourceVersion(resourceVersion string) {
- if len(resourceVersion) == 0 {
- RemoveNestedField(u.Object, "metadata", "resourceVersion")
- return
- }
- u.setNestedField(resourceVersion, "metadata", "resourceVersion")
-}
-
-func (u *Unstructured) GetGeneration() int64 {
- val, found, err := NestedInt64(u.Object, "metadata", "generation")
- if !found || err != nil {
- return 0
- }
- return val
-}
-
-func (u *Unstructured) SetGeneration(generation int64) {
- if generation == 0 {
- RemoveNestedField(u.Object, "metadata", "generation")
- return
- }
- u.setNestedField(generation, "metadata", "generation")
-}
-
-func (u *Unstructured) GetSelfLink() string {
- return getNestedString(u.Object, "metadata", "selfLink")
-}
-
-func (u *Unstructured) SetSelfLink(selfLink string) {
- if len(selfLink) == 0 {
- RemoveNestedField(u.Object, "metadata", "selfLink")
- return
- }
- u.setNestedField(selfLink, "metadata", "selfLink")
-}
-
-func (u *Unstructured) GetContinue() string {
- return getNestedString(u.Object, "metadata", "continue")
-}
-
-func (u *Unstructured) SetContinue(c string) {
- if len(c) == 0 {
- RemoveNestedField(u.Object, "metadata", "continue")
- return
- }
- u.setNestedField(c, "metadata", "continue")
-}
-
-func (u *Unstructured) GetRemainingItemCount() *int64 {
- return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount")
-}
-
-func (u *Unstructured) SetRemainingItemCount(c *int64) {
- if c == nil {
- RemoveNestedField(u.Object, "metadata", "remainingItemCount")
- } else {
- u.setNestedField(*c, "metadata", "remainingItemCount")
- }
-}
-
-func (u *Unstructured) GetCreationTimestamp() metav1.Time {
- var timestamp metav1.Time
- timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp"))
- return timestamp
-}
-
-func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) {
- ts, _ := timestamp.MarshalQueryParameter()
- if len(ts) == 0 || timestamp.Time.IsZero() {
- RemoveNestedField(u.Object, "metadata", "creationTimestamp")
- return
- }
- u.setNestedField(ts, "metadata", "creationTimestamp")
-}
-
-func (u *Unstructured) GetDeletionTimestamp() *metav1.Time {
- var timestamp metav1.Time
- timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp"))
- if timestamp.IsZero() {
- return nil
- }
- return &timestamp
-}
-
-func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) {
- if timestamp == nil {
- RemoveNestedField(u.Object, "metadata", "deletionTimestamp")
- return
- }
- ts, _ := timestamp.MarshalQueryParameter()
- u.setNestedField(ts, "metadata", "deletionTimestamp")
-}
-
-func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 {
- val, found, err := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds")
- if !found || err != nil {
- return nil
- }
- return &val
-}
-
-func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
- if deletionGracePeriodSeconds == nil {
- RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds")
- return
- }
- u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds")
-}
-
-func (u *Unstructured) GetLabels() map[string]string {
- m, _, _ := NestedStringMap(u.Object, "metadata", "labels")
- return m
-}
-
-func (u *Unstructured) SetLabels(labels map[string]string) {
- if labels == nil {
- RemoveNestedField(u.Object, "metadata", "labels")
- return
- }
- u.setNestedMap(labels, "metadata", "labels")
-}
-
-func (u *Unstructured) GetAnnotations() map[string]string {
- m, _, _ := NestedStringMap(u.Object, "metadata", "annotations")
- return m
-}
-
-func (u *Unstructured) SetAnnotations(annotations map[string]string) {
- if annotations == nil {
- RemoveNestedField(u.Object, "metadata", "annotations")
- return
- }
- u.setNestedMap(annotations, "metadata", "annotations")
-}
-
-func (u *Unstructured) SetGroupVersionKind(gvk schema.GroupVersionKind) {
- u.SetAPIVersion(gvk.GroupVersion().String())
- u.SetKind(gvk.Kind)
-}
-
-func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind {
- gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
- if err != nil {
- return schema.GroupVersionKind{}
- }
- gvk := gv.WithKind(u.GetKind())
- return gvk
-}
-
-func (u *Unstructured) GetFinalizers() []string {
- val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers")
- return val
-}
-
-func (u *Unstructured) SetFinalizers(finalizers []string) {
- if finalizers == nil {
- RemoveNestedField(u.Object, "metadata", "finalizers")
- return
- }
- u.setNestedStringSlice(finalizers, "metadata", "finalizers")
-}
-
-func (u *Unstructured) GetClusterName() string {
- return getNestedString(u.Object, "metadata", "clusterName")
-}
-
-func (u *Unstructured) SetClusterName(clusterName string) {
- if len(clusterName) == 0 {
- RemoveNestedField(u.Object, "metadata", "clusterName")
- return
- }
- u.setNestedField(clusterName, "metadata", "clusterName")
-}
-
-func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry {
- items, found, err := NestedSlice(u.Object, "metadata", "managedFields")
- if !found || err != nil {
- return nil
- }
- managedFields := []metav1.ManagedFieldsEntry{}
- for _, item := range items {
- m, ok := item.(map[string]interface{})
- if !ok {
- utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object, item %v is not a map", item))
- return nil
- }
- out := metav1.ManagedFieldsEntry{}
- if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, &out); err != nil {
- utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object: %v", err))
- return nil
- }
- managedFields = append(managedFields, out)
- }
- return managedFields
-}
-
-func (u *Unstructured) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) {
- if managedFields == nil {
- RemoveNestedField(u.Object, "metadata", "managedFields")
- return
- }
- items := []interface{}{}
- for _, managedFieldsEntry := range managedFields {
- out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&managedFieldsEntry)
- if err != nil {
- utilruntime.HandleError(fmt.Errorf("unable to set managedFields for object: %v", err))
- return
- }
- items = append(items, out)
- }
- u.setNestedSlice(items, "metadata", "managedFields")
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
deleted file mode 100644
index 5028f5fb5..000000000
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package unstructured
-
-import (
- "bytes"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-var _ runtime.Unstructured = &UnstructuredList{}
-var _ metav1.ListInterface = &UnstructuredList{}
-
-// UnstructuredList allows lists that do not have Golang structs
-// registered to be manipulated generically. This can be used to deal
-// with the API lists from a plug-in.
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:deepcopy-gen=true
-type UnstructuredList struct {
- Object map[string]interface{}
-
- // Items is a list of unstructured objects.
- Items []Unstructured `json:"items"`
-}
-
-func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u }
-
-func (u *UnstructuredList) IsList() bool { return true }
-
-func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error {
- for i := range u.Items {
- if err := fn(&u.Items[i]); err != nil {
- return err
- }
- }
- return nil
-}
-
-// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
-// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
-func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured {
- out := new(UnstructuredList)
- if u != nil {
- out.SetGroupVersionKind(u.GroupVersionKind())
- }
- return out
-}
-
-// UnstructuredContent returns a map contain an overlay of the Items field onto
-// the Object field. Items always overwrites overlay.
-func (u *UnstructuredList) UnstructuredContent() map[string]interface{} {
- out := make(map[string]interface{}, len(u.Object)+1)
-
- // shallow copy every property
- for k, v := range u.Object {
- out[k] = v
- }
-
- items := make([]interface{}, len(u.Items))
- for i, item := range u.Items {
- items[i] = item.UnstructuredContent()
- }
- out["items"] = items
- return out
-}
-
-// SetUnstructuredContent obeys the conventions of List and keeps Items and the items
-// array in sync. If items is not an array of objects in the incoming map, then any
-// mismatched item will be removed.
-func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) {
- obj.Object = content
- if content == nil {
- obj.Items = nil
- return
- }
- items, ok := obj.Object["items"].([]interface{})
- if !ok || items == nil {
- items = []interface{}{}
- }
- unstructuredItems := make([]Unstructured, 0, len(items))
- newItems := make([]interface{}, 0, len(items))
- for _, item := range items {
- o, ok := item.(map[string]interface{})
- if !ok {
- continue
- }
- unstructuredItems = append(unstructuredItems, Unstructured{Object: o})
- newItems = append(newItems, o)
- }
- obj.Items = unstructuredItems
- obj.Object["items"] = newItems
-}
-
-func (u *UnstructuredList) DeepCopy() *UnstructuredList {
- if u == nil {
- return nil
- }
- out := new(UnstructuredList)
- *out = *u
- out.Object = runtime.DeepCopyJSON(u.Object)
- out.Items = make([]Unstructured, len(u.Items))
- for i := range u.Items {
- u.Items[i].DeepCopyInto(&out.Items[i])
- }
- return out
-}
-
-// MarshalJSON ensures that the unstructured list object produces proper
-// JSON when passed to Go's standard JSON library.
-func (u *UnstructuredList) MarshalJSON() ([]byte, error) {
- var buf bytes.Buffer
- err := UnstructuredJSONScheme.Encode(u, &buf)
- return buf.Bytes(), err
-}
-
-// UnmarshalJSON ensures that the unstructured list object properly
-// decodes JSON when passed to Go's standard JSON library.
-func (u *UnstructuredList) UnmarshalJSON(b []byte) error {
- _, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
- return err
-}
-
-func (u *UnstructuredList) GetAPIVersion() string {
- return getNestedString(u.Object, "apiVersion")
-}
-
-func (u *UnstructuredList) SetAPIVersion(version string) {
- u.setNestedField(version, "apiVersion")
-}
-
-func (u *UnstructuredList) GetKind() string {
- return getNestedString(u.Object, "kind")
-}
-
-func (u *UnstructuredList) SetKind(kind string) {
- u.setNestedField(kind, "kind")
-}
-
-func (u *UnstructuredList) GetResourceVersion() string {
- return getNestedString(u.Object, "metadata", "resourceVersion")
-}
-
-func (u *UnstructuredList) SetResourceVersion(version string) {
- u.setNestedField(version, "metadata", "resourceVersion")
-}
-
-func (u *UnstructuredList) GetSelfLink() string {
- return getNestedString(u.Object, "metadata", "selfLink")
-}
-
-func (u *UnstructuredList) SetSelfLink(selfLink string) {
- u.setNestedField(selfLink, "metadata", "selfLink")
-}
-
-func (u *UnstructuredList) GetContinue() string {
- return getNestedString(u.Object, "metadata", "continue")
-}
-
-func (u *UnstructuredList) SetContinue(c string) {
- u.setNestedField(c, "metadata", "continue")
-}
-
-func (u *UnstructuredList) GetRemainingItemCount() *int64 {
- return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount")
-}
-
-func (u *UnstructuredList) SetRemainingItemCount(c *int64) {
- if c == nil {
- RemoveNestedField(u.Object, "metadata", "remainingItemCount")
- } else {
- u.setNestedField(*c, "metadata", "remainingItemCount")
- }
-}
-
-func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) {
- u.SetAPIVersion(gvk.GroupVersion().String())
- u.SetKind(gvk.Kind)
-}
-
-func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind {
- gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
- if err != nil {
- return schema.GroupVersionKind{}
- }
- gvk := gv.WithKind(u.GetKind())
- return gvk
-}
-
-func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) {
- if u.Object == nil {
- u.Object = make(map[string]interface{})
- }
- SetNestedField(u.Object, value, fields...)
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
deleted file mode 100644
index 9a9f25e8f..000000000
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package unstructured
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Unstructured) DeepCopyInto(out *Unstructured) {
- clone := in.DeepCopy()
- *out = *clone
- return
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Unstructured) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *UnstructuredList) DeepCopyInto(out *UnstructuredList) {
- clone := in.DeepCopy()
- *out = *clone
- return
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *UnstructuredList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
deleted file mode 100644
index e55ab94d1..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package serializer
-
-import (
- "mime"
- "strings"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer/json"
- "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
- "k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
- "k8s.io/apimachinery/pkg/runtime/serializer/versioning"
-)
-
-// serializerExtensions are for serializers that are conditionally compiled in
-var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
-
-type serializerType struct {
- AcceptContentTypes []string
- ContentType string
- FileExtensions []string
- // EncodesAsText should be true if this content type can be represented safely in UTF-8
- EncodesAsText bool
-
- Serializer runtime.Serializer
- PrettySerializer runtime.Serializer
-
- AcceptStreamContentTypes []string
- StreamContentType string
-
- Framer runtime.Framer
- StreamSerializer runtime.Serializer
-}
-
-func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType {
- jsonSerializer := json.NewSerializerWithOptions(
- mf, scheme, scheme,
- json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
- )
- jsonSerializerType := serializerType{
- AcceptContentTypes: []string{runtime.ContentTypeJSON},
- ContentType: runtime.ContentTypeJSON,
- FileExtensions: []string{"json"},
- EncodesAsText: true,
- Serializer: jsonSerializer,
-
- Framer: json.Framer,
- StreamSerializer: jsonSerializer,
- }
- if options.Pretty {
- jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions(
- mf, scheme, scheme,
- json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict},
- )
- }
-
- yamlSerializer := json.NewSerializerWithOptions(
- mf, scheme, scheme,
- json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
- )
- protoSerializer := protobuf.NewSerializer(scheme, scheme)
- protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
-
- serializers := []serializerType{
- jsonSerializerType,
- {
- AcceptContentTypes: []string{runtime.ContentTypeYAML},
- ContentType: runtime.ContentTypeYAML,
- FileExtensions: []string{"yaml"},
- EncodesAsText: true,
- Serializer: yamlSerializer,
- },
- {
- AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
- ContentType: runtime.ContentTypeProtobuf,
- FileExtensions: []string{"pb"},
- Serializer: protoSerializer,
-
- Framer: protobuf.LengthDelimitedFramer,
- StreamSerializer: protoRawSerializer,
- },
- }
-
- for _, fn := range serializerExtensions {
- if serializer, ok := fn(scheme); ok {
- serializers = append(serializers, serializer)
- }
- }
- return serializers
-}
-
-// CodecFactory provides methods for retrieving codecs and serializers for specific
-// versions and content types.
-type CodecFactory struct {
- scheme *runtime.Scheme
- universal runtime.Decoder
- accepts []runtime.SerializerInfo
-
- legacySerializer runtime.Serializer
-}
-
-// CodecFactoryOptions holds the options for configuring CodecFactory behavior
-type CodecFactoryOptions struct {
- // Strict configures all serializers in strict mode
- Strict bool
- // Pretty includes a pretty serializer along with the non-pretty one
- Pretty bool
-}
-
-// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it.
-// Functions implementing this type can be passed to the NewCodecFactory() constructor.
-type CodecFactoryOptionsMutator func(*CodecFactoryOptions)
-
-// EnablePretty enables including a pretty serializer along with the non-pretty one
-func EnablePretty(options *CodecFactoryOptions) {
- options.Pretty = true
-}
-
-// DisablePretty disables including a pretty serializer along with the non-pretty one
-func DisablePretty(options *CodecFactoryOptions) {
- options.Pretty = false
-}
-
-// EnableStrict enables configuring all serializers in strict mode
-func EnableStrict(options *CodecFactoryOptions) {
- options.Strict = true
-}
-
-// DisableStrict disables configuring all serializers in strict mode
-func DisableStrict(options *CodecFactoryOptions) {
- options.Strict = false
-}
-
-// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
-// and conversion wrappers to define preferred internal and external versions. In the future,
-// as the internal version is used less, callers may instead use a defaulting serializer and
-// only convert objects which are shared internally (Status, common API machinery).
-//
-// Mutators can be passed to change the CodecFactoryOptions before construction of the factory.
-// It is recommended to explicitly pass mutators instead of relying on defaults.
-// By default, Pretty is enabled -- this is conformant with previously supported behavior.
-//
-// TODO: allow other codecs to be compiled in?
-// TODO: accept a scheme interface
-func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory {
- options := CodecFactoryOptions{Pretty: true}
- for _, fn := range mutators {
- fn(&options)
- }
-
- serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options)
- return newCodecFactory(scheme, serializers)
-}
-
-// newCodecFactory is a helper for testing that allows a different metafactory to be specified.
-func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
- decoders := make([]runtime.Decoder, 0, len(serializers))
- var accepts []runtime.SerializerInfo
- alreadyAccepted := make(map[string]struct{})
-
- var legacySerializer runtime.Serializer
- for _, d := range serializers {
- decoders = append(decoders, d.Serializer)
- for _, mediaType := range d.AcceptContentTypes {
- if _, ok := alreadyAccepted[mediaType]; ok {
- continue
- }
- alreadyAccepted[mediaType] = struct{}{}
- info := runtime.SerializerInfo{
- MediaType: d.ContentType,
- EncodesAsText: d.EncodesAsText,
- Serializer: d.Serializer,
- PrettySerializer: d.PrettySerializer,
- }
-
- mediaType, _, err := mime.ParseMediaType(info.MediaType)
- if err != nil {
- panic(err)
- }
- parts := strings.SplitN(mediaType, "/", 2)
- info.MediaTypeType = parts[0]
- info.MediaTypeSubType = parts[1]
-
- if d.StreamSerializer != nil {
- info.StreamSerializer = &runtime.StreamSerializerInfo{
- Serializer: d.StreamSerializer,
- EncodesAsText: d.EncodesAsText,
- Framer: d.Framer,
- }
- }
- accepts = append(accepts, info)
- if mediaType == runtime.ContentTypeJSON {
- legacySerializer = d.Serializer
- }
- }
- }
- if legacySerializer == nil {
- legacySerializer = serializers[0].Serializer
- }
-
- return CodecFactory{
- scheme: scheme,
- universal: recognizer.NewDecoder(decoders...),
-
- accepts: accepts,
-
- legacySerializer: legacySerializer,
- }
-}
-
-// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the
-// caller requests it.
-func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer {
- return WithoutConversionCodecFactory{f}
-}
-
-// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for.
-func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo {
- return f.accepts
-}
-
-// LegacyCodec encodes output to a given API versions, and decodes output into the internal form from
-// any recognized source. The returned codec will always encode output to JSON. If a type is not
-// found in the list of versions an error will be returned.
-//
-// This method is deprecated - clients and servers should negotiate a serializer by mime-type and
-// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder().
-//
-// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions.
-// All other callers will be forced to request a Codec directly.
-func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec {
- return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner)
-}
-
-// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies
-// runtime.Object. It does not perform conversion. It does not perform defaulting.
-func (f CodecFactory) UniversalDeserializer() runtime.Decoder {
- return f.universal
-}
-
-// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used
-// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes
-// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate
-// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified,
-// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs
-// defaulting.
-//
-// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form
-// TODO: only accept a group versioner
-func (f CodecFactory) UniversalDecoder(versions ...schema.GroupVersion) runtime.Decoder {
- var versioner runtime.GroupVersioner
- if len(versions) == 0 {
- versioner = runtime.InternalGroupVersioner
- } else {
- versioner = schema.GroupVersions(versions)
- }
- return f.CodecForVersions(nil, f.universal, nil, versioner)
-}
-
-// CodecForVersions creates a codec with the provided serializer. If an object is decoded and its group is not in the list,
-// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not
-// converted. If encode or decode are nil, no conversion is performed.
-func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode runtime.GroupVersioner, decode runtime.GroupVersioner) runtime.Codec {
- // TODO: these are for backcompat, remove them in the future
- if encode == nil {
- encode = runtime.DisabledGroupVersioner
- }
- if decode == nil {
- decode = runtime.InternalGroupVersioner
- }
- return versioning.NewDefaultingCodecForScheme(f.scheme, encoder, decoder, encode, decode)
-}
-
-// DecoderToVersion returns a decoder that targets the provided group version.
-func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
- return f.CodecForVersions(nil, decoder, nil, gv)
-}
-
-// EncoderForVersion returns an encoder that targets the provided group version.
-func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
- return f.CodecForVersions(encoder, nil, gv, nil)
-}
-
-// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion.
-// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future
-// will be unnecessary when we change the signature of NegotiatedSerializer.
-type WithoutConversionCodecFactory struct {
- CodecFactory
-}
-
-// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object
-// when serialized.
-func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder {
- return runtime.WithVersionEncoder{
- Version: version,
- Encoder: serializer,
- ObjectTyper: f.CodecFactory.scheme,
- }
-}
-
-// DecoderToVersion returns an decoder that does not do conversion.
-func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
- return runtime.WithoutVersionDecoder{
- Decoder: serializer,
- }
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
deleted file mode 100644
index 83b2e1393..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package json
-
-import (
- "encoding/json"
- "io"
- "strconv"
- "unsafe"
-
- jsoniter "github.com/json-iterator/go"
- "github.com/modern-go/reflect2"
- "sigs.k8s.io/yaml"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
- "k8s.io/apimachinery/pkg/util/framer"
- utilyaml "k8s.io/apimachinery/pkg/util/yaml"
- "k8s.io/klog/v2"
-)
-
-// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
-// is not nil, the object has the group, version, and kind fields set.
-// Deprecated: use NewSerializerWithOptions instead.
-func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
- return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false})
-}
-
-// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
-// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that
-// matches JSON, and will error if constructs are used that do not serialize to JSON.
-// Deprecated: use NewSerializerWithOptions instead.
-func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
- return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false})
-}
-
-// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML
-// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer
-// and are immutable.
-func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer {
- return &Serializer{
- meta: meta,
- creater: creater,
- typer: typer,
- options: options,
- identifier: identifier(options),
- }
-}
-
-// identifier computes Identifier of Encoder based on the given options.
-func identifier(options SerializerOptions) runtime.Identifier {
- result := map[string]string{
- "name": "json",
- "yaml": strconv.FormatBool(options.Yaml),
- "pretty": strconv.FormatBool(options.Pretty),
- }
- identifier, err := json.Marshal(result)
- if err != nil {
- klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err)
- }
- return runtime.Identifier(identifier)
-}
-
-// SerializerOptions holds the options which are used to configure a JSON/YAML serializer.
-// example:
-// (1) To configure a JSON serializer, set `Yaml` to `false`.
-// (2) To configure a YAML serializer, set `Yaml` to `true`.
-// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`.
-type SerializerOptions struct {
- // Yaml: configures the Serializer to work with JSON(false) or YAML(true).
- // When `Yaml` is enabled, this serializer only supports the subset of YAML that
- // matches JSON, and will error if constructs are used that do not serialize to JSON.
- Yaml bool
-
- // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output.
- // This option is silently ignored when `Yaml` is `true`.
- Pretty bool
-
- // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML.
- // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths.
- Strict bool
-}
-
-// Serializer handles encoding versioned objects into the proper JSON form
-type Serializer struct {
- meta MetaFactory
- options SerializerOptions
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
-
- identifier runtime.Identifier
-}
-
-// Serializer implements Serializer
-var _ runtime.Serializer = &Serializer{}
-var _ recognizer.RecognizingDecoder = &Serializer{}
-
-type customNumberExtension struct {
- jsoniter.DummyExtension
-}
-
-func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
- if typ.String() == "interface {}" {
- return customNumberDecoder{}
- }
- return nil
-}
-
-type customNumberDecoder struct {
-}
-
-func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- var number jsoniter.Number
- iter.ReadVal(&number)
- i64, err := strconv.ParseInt(string(number), 10, 64)
- if err == nil {
- *(*interface{})(ptr) = i64
- return
- }
- f64, err := strconv.ParseFloat(string(number), 64)
- if err == nil {
- *(*interface{})(ptr) = f64
- return
- }
- iter.ReportError("DecodeNumber", err.Error())
- default:
- *(*interface{})(ptr) = iter.Read()
- }
-}
-
-// CaseSensitiveJSONIterator returns a jsoniterator API that's configured to be
-// case-sensitive when unmarshalling, and otherwise compatible with
-// the encoding/json standard library.
-func CaseSensitiveJSONIterator() jsoniter.API {
- config := jsoniter.Config{
- EscapeHTML: true,
- SortMapKeys: true,
- ValidateJsonRawMessage: true,
- CaseSensitive: true,
- }.Froze()
- // Force jsoniter to decode number to interface{} via int64/float64, if possible.
- config.RegisterExtension(&customNumberExtension{})
- return config
-}
-
-// StrictCaseSensitiveJSONIterator returns a jsoniterator API that's configured to be
-// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with
-// the encoding/json standard library.
-func StrictCaseSensitiveJSONIterator() jsoniter.API {
- config := jsoniter.Config{
- EscapeHTML: true,
- SortMapKeys: true,
- ValidateJsonRawMessage: true,
- CaseSensitive: true,
- DisallowUnknownFields: true,
- }.Froze()
- // Force jsoniter to decode number to interface{} via int64/float64, if possible.
- config.RegisterExtension(&customNumberExtension{})
- return config
-}
-
-// Private copies of jsoniter to try to shield against possible mutations
-// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them
-// in some other library will mess with every usage of the jsoniter library in the whole program.
-// See https://github.com/json-iterator/go/issues/265
-var caseSensitiveJSONIterator = CaseSensitiveJSONIterator()
-var strictCaseSensitiveJSONIterator = StrictCaseSensitiveJSONIterator()
-
-// gvkWithDefaults returns group kind and version defaulting from provided default
-func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
- if len(actual.Kind) == 0 {
- actual.Kind = defaultGVK.Kind
- }
- if len(actual.Version) == 0 && len(actual.Group) == 0 {
- actual.Group = defaultGVK.Group
- actual.Version = defaultGVK.Version
- }
- if len(actual.Version) == 0 && actual.Group == defaultGVK.Group {
- actual.Version = defaultGVK.Version
- }
- return actual
-}
-
-// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then
-// load that data into an object matching the desired schema kind or the provided into.
-// If into is *runtime.Unknown, the raw data will be extracted and no decoding will be performed.
-// If into is not registered with the typer, then the object will be straight decoded using normal JSON/YAML unmarshalling.
-// If into is provided and the original data is not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk.
-// If into is nil or data's gvk different from into's gvk, it will generate a new Object with ObjectCreater.New(gvk)
-// On success or most errors, the method will return the calculated schema kind.
-// The gvk calculate priority will be originalData > default gvk > into
-func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- data := originalData
- if s.options.Yaml {
- altered, err := yaml.YAMLToJSON(data)
- if err != nil {
- return nil, nil, err
- }
- data = altered
- }
-
- actual, err := s.meta.Interpret(data)
- if err != nil {
- return nil, nil, err
- }
-
- if gvk != nil {
- *actual = gvkWithDefaults(*actual, *gvk)
- }
-
- if unk, ok := into.(*runtime.Unknown); ok && unk != nil {
- unk.Raw = originalData
- unk.ContentType = runtime.ContentTypeJSON
- unk.GetObjectKind().SetGroupVersionKind(*actual)
- return unk, actual, nil
- }
-
- if into != nil {
- _, isUnstructured := into.(runtime.Unstructured)
- types, _, err := s.typer.ObjectKinds(into)
- switch {
- case runtime.IsNotRegisteredError(err), isUnstructured:
- if err := caseSensitiveJSONIterator.Unmarshal(data, into); err != nil {
- return nil, actual, err
- }
- return into, actual, nil
- case err != nil:
- return nil, actual, err
- default:
- *actual = gvkWithDefaults(*actual, types[0])
- }
- }
-
- if len(actual.Kind) == 0 {
- return nil, actual, runtime.NewMissingKindErr(string(originalData))
- }
- if len(actual.Version) == 0 {
- return nil, actual, runtime.NewMissingVersionErr(string(originalData))
- }
-
- // use the target if necessary
- obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into)
- if err != nil {
- return nil, actual, err
- }
-
- if err := caseSensitiveJSONIterator.Unmarshal(data, obj); err != nil {
- return nil, actual, err
- }
-
- // If the deserializer is non-strict, return successfully here.
- if !s.options.Strict {
- return obj, actual, nil
- }
-
- // In strict mode pass the data trough the YAMLToJSONStrict converter.
- // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data,
- // the output would equal the input, unless there is a parsing error such as duplicate fields.
- // As we know this was successful in the non-strict case, the only error that may be returned here
- // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError
- // the actual error is that the object contains duplicate fields.
- altered, err := yaml.YAMLToJSONStrict(originalData)
- if err != nil {
- return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
- }
- // As performance is not an issue for now for the strict deserializer (one has regardless to do
- // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated
- // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is
- // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError,
- // the actual error is that the object contains unknown field.
- strictObj := obj.DeepCopyObject()
- if err := strictCaseSensitiveJSONIterator.Unmarshal(altered, strictObj); err != nil {
- return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
- }
- // Always return the same object as the non-strict serializer to avoid any deviations.
- return obj, actual, nil
-}
-
-// Encode serializes the provided object to the given writer.
-func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(s.Identifier(), s.doEncode, w)
- }
- return s.doEncode(obj, w)
-}
-
-func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
- if s.options.Yaml {
- json, err := caseSensitiveJSONIterator.Marshal(obj)
- if err != nil {
- return err
- }
- data, err := yaml.JSONToYAML(json)
- if err != nil {
- return err
- }
- _, err = w.Write(data)
- return err
- }
-
- if s.options.Pretty {
- data, err := caseSensitiveJSONIterator.MarshalIndent(obj, "", " ")
- if err != nil {
- return err
- }
- _, err = w.Write(data)
- return err
- }
- encoder := json.NewEncoder(w)
- return encoder.Encode(obj)
-}
-
-// Identifier implements runtime.Encoder interface.
-func (s *Serializer) Identifier() runtime.Identifier {
- return s.identifier
-}
-
-// RecognizesData implements the RecognizingDecoder interface.
-func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) {
- if s.options.Yaml {
- // we could potentially look for '---'
- return false, true, nil
- }
- _, _, ok = utilyaml.GuessJSONStream(peek, 2048)
- return ok, false, nil
-}
-
-// Framer is the default JSON framing behavior, with newlines delimiting individual objects.
-var Framer = jsonFramer{}
-
-type jsonFramer struct{}
-
-// NewFrameWriter implements stream framing for this serializer
-func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer {
- // we can write JSON objects directly to the writer, because they are self-framing
- return w
-}
-
-// NewFrameReader implements stream framing for this serializer
-func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
- // we need to extract the JSON chunks of data to pass to Decode()
- return framer.NewJSONFramedReader(r)
-}
-
-// YAMLFramer is the default JSON framing behavior, with newlines delimiting individual objects.
-var YAMLFramer = yamlFramer{}
-
-type yamlFramer struct{}
-
-// NewFrameWriter implements stream framing for this serializer
-func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer {
- return yamlFrameWriter{w}
-}
-
-// NewFrameReader implements stream framing for this serializer
-func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
- // extract the YAML document chunks directly
- return utilyaml.NewDocumentDecoder(r)
-}
-
-type yamlFrameWriter struct {
- w io.Writer
-}
-
-// Write separates each document with the YAML document separator (`---` followed by line
-// break). Writers must write well formed YAML documents (include a final line break).
-func (w yamlFrameWriter) Write(data []byte) (n int, err error) {
- if _, err := w.w.Write([]byte("---\n")); err != nil {
- return 0, err
- }
- return w.w.Write(data)
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
deleted file mode 100644
index df3f5f989..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package json
-
-import (
- "encoding/json"
- "fmt"
-
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// MetaFactory is used to store and retrieve the version and kind
-// information for JSON objects in a serializer.
-type MetaFactory interface {
- // Interpret should return the version and kind of the wire-format of
- // the object.
- Interpret(data []byte) (*schema.GroupVersionKind, error)
-}
-
-// DefaultMetaFactory is a default factory for versioning objects in JSON. The object
-// in memory and in the default JSON serialization will use the "kind" and "apiVersion"
-// fields.
-var DefaultMetaFactory = SimpleMetaFactory{}
-
-// SimpleMetaFactory provides default methods for retrieving the type and version of objects
-// that are identified with an "apiVersion" and "kind" fields in their JSON
-// serialization. It may be parameterized with the names of the fields in memory, or an
-// optional list of base structs to search for those fields in memory.
-type SimpleMetaFactory struct {
-}
-
-// Interpret will return the APIVersion and Kind of the JSON wire-format
-// encoding of an object, or an error.
-func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) {
- findKind := struct {
- // +optional
- APIVersion string `json:"apiVersion,omitempty"`
- // +optional
- Kind string `json:"kind,omitempty"`
- }{}
- if err := json.Unmarshal(data, &findKind); err != nil {
- return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err)
- }
- gv, err := schema.ParseGroupVersion(findKind.APIVersion)
- if err != nil {
- return nil, err
- }
- return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
deleted file mode 100644
index a42b4a41a..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package serializer
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-// TODO: We should split negotiated serializers that we can change versions on from those we can change
-// serialization formats on
-type negotiatedSerializerWrapper struct {
- info runtime.SerializerInfo
-}
-
-func NegotiatedSerializerWrapper(info runtime.SerializerInfo) runtime.NegotiatedSerializer {
- return &negotiatedSerializerWrapper{info}
-}
-
-func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []runtime.SerializerInfo {
- return []runtime.SerializerInfo{n.info}
-}
-
-func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ runtime.GroupVersioner) runtime.Encoder {
- return e
-}
-
-func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv runtime.GroupVersioner) runtime.Decoder {
- return d
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
deleted file mode 100644
index 72d0ac79b..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package protobuf provides a Kubernetes serializer for the protobuf format.
-package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
deleted file mode 100644
index 404fb1b7e..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package protobuf
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "reflect"
-
- "github.com/gogo/protobuf/proto"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
- "k8s.io/apimachinery/pkg/util/framer"
-)
-
-var (
- // protoEncodingPrefix serves as a magic number for an encoded protobuf message on this serializer. All
- // proto messages serialized by this schema will be preceded by the bytes 0x6b 0x38 0x73, with the fourth
- // byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that
- // the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2).
- //
- // See k8s.io/apimachinery/pkg/runtime/generated.proto for details of the runtime.Unknown message.
- //
- // This encoding scheme is experimental, and is subject to change at any time.
- protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00}
-)
-
-type errNotMarshalable struct {
- t reflect.Type
-}
-
-func (e errNotMarshalable) Error() string {
- return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t)
-}
-
-func (e errNotMarshalable) Status() metav1.Status {
- return metav1.Status{
- Status: metav1.StatusFailure,
- Code: http.StatusNotAcceptable,
- Reason: metav1.StatusReason("NotAcceptable"),
- Message: e.Error(),
- }
-}
-
-// IsNotMarshalable checks the type of error, returns a boolean true if error is not nil and not marshalable false otherwise
-func IsNotMarshalable(err error) bool {
- _, ok := err.(errNotMarshalable)
- return err != nil && ok
-}
-
-// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
-// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
-// as-is (any type info passed with the object will be used).
-func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
- return &Serializer{
- prefix: protoEncodingPrefix,
- creater: creater,
- typer: typer,
- }
-}
-
-// Serializer handles encoding versioned objects into the proper wire form
-type Serializer struct {
- prefix []byte
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
-}
-
-var _ runtime.Serializer = &Serializer{}
-var _ recognizer.RecognizingDecoder = &Serializer{}
-
-const serializerIdentifier runtime.Identifier = "protobuf"
-
-// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
-// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
-// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
-// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
-// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
-// errors, the method will return the calculated schema kind.
-func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- prefixLen := len(s.prefix)
- switch {
- case len(originalData) == 0:
- // TODO: treat like decoding {} from JSON with defaulting
- return nil, nil, fmt.Errorf("empty data")
- case len(originalData) < prefixLen || !bytes.Equal(s.prefix, originalData[:prefixLen]):
- return nil, nil, fmt.Errorf("provided data does not appear to be a protobuf message, expected prefix %v", s.prefix)
- case len(originalData) == prefixLen:
- // TODO: treat like decoding {} from JSON with defaulting
- return nil, nil, fmt.Errorf("empty body")
- }
-
- data := originalData[prefixLen:]
- unk := runtime.Unknown{}
- if err := unk.Unmarshal(data); err != nil {
- return nil, nil, err
- }
-
- actual := unk.GroupVersionKind()
- copyKindDefaults(&actual, gvk)
-
- if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
- *intoUnknown = unk
- if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok {
- intoUnknown.ContentType = runtime.ContentTypeProtobuf
- }
- return intoUnknown, &actual, nil
- }
-
- if into != nil {
- types, _, err := s.typer.ObjectKinds(into)
- switch {
- case runtime.IsNotRegisteredError(err):
- pb, ok := into.(proto.Message)
- if !ok {
- return nil, &actual, errNotMarshalable{reflect.TypeOf(into)}
- }
- if err := proto.Unmarshal(unk.Raw, pb); err != nil {
- return nil, &actual, err
- }
- return into, &actual, nil
- case err != nil:
- return nil, &actual, err
- default:
- copyKindDefaults(&actual, &types[0])
- // if the result of defaulting did not set a version or group, ensure that at least group is set
- // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
- // of into is set if there is no better information from the caller or object.
- if len(actual.Version) == 0 && len(actual.Group) == 0 {
- actual.Group = types[0].Group
- }
- }
- }
-
- if len(actual.Kind) == 0 {
- return nil, &actual, runtime.NewMissingKindErr(fmt.Sprintf("%#v", unk.TypeMeta))
- }
- if len(actual.Version) == 0 {
- return nil, &actual, runtime.NewMissingVersionErr(fmt.Sprintf("%#v", unk.TypeMeta))
- }
-
- return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw)
-}
-
-// Encode serializes the provided object to the given writer.
-func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(s.Identifier(), s.doEncode, w)
- }
- return s.doEncode(obj, w)
-}
-
-func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
- prefixSize := uint64(len(s.prefix))
-
- var unk runtime.Unknown
- switch t := obj.(type) {
- case *runtime.Unknown:
- estimatedSize := prefixSize + uint64(t.Size())
- data := make([]byte, estimatedSize)
- i, err := t.MarshalTo(data[prefixSize:])
- if err != nil {
- return err
- }
- copy(data, s.prefix)
- _, err = w.Write(data[:prefixSize+uint64(i)])
- return err
- default:
- kind := obj.GetObjectKind().GroupVersionKind()
- unk = runtime.Unknown{
- TypeMeta: runtime.TypeMeta{
- Kind: kind.Kind,
- APIVersion: kind.GroupVersion().String(),
- },
- }
- }
-
- switch t := obj.(type) {
- case bufferedMarshaller:
- // this path performs a single allocation during write but requires the caller to implement
- // the more efficient Size and MarshalToSizedBuffer methods
- encodedSize := uint64(t.Size())
- estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize)
- data := make([]byte, estimatedSize)
-
- i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize)
- if err != nil {
- return err
- }
-
- copy(data, s.prefix)
-
- _, err = w.Write(data[:prefixSize+uint64(i)])
- return err
-
- case proto.Marshaler:
- // this path performs extra allocations
- data, err := t.Marshal()
- if err != nil {
- return err
- }
- unk.Raw = data
-
- estimatedSize := prefixSize + uint64(unk.Size())
- data = make([]byte, estimatedSize)
-
- i, err := unk.MarshalTo(data[prefixSize:])
- if err != nil {
- return err
- }
-
- copy(data, s.prefix)
-
- _, err = w.Write(data[:prefixSize+uint64(i)])
- return err
-
- default:
- // TODO: marshal with a different content type and serializer (JSON for third party objects)
- return errNotMarshalable{reflect.TypeOf(obj)}
- }
-}
-
-// Identifier implements runtime.Encoder interface.
-func (s *Serializer) Identifier() runtime.Identifier {
- return serializerIdentifier
-}
-
-// RecognizesData implements the RecognizingDecoder interface.
-func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) {
- prefix := make([]byte, 4)
- n, err := peek.Read(prefix)
- if err != nil {
- if err == io.EOF {
- return false, false, nil
- }
- return false, false, err
- }
- if n != 4 {
- return false, false, nil
- }
- return bytes.Equal(s.prefix, prefix), false, nil
-}
-
-// copyKindDefaults defaults dst to the value in src if dst does not have a value set.
-func copyKindDefaults(dst, src *schema.GroupVersionKind) {
- if src == nil {
- return
- }
- // apply kind and version defaulting from provided default
- if len(dst.Kind) == 0 {
- dst.Kind = src.Kind
- }
- if len(dst.Version) == 0 && len(src.Version) > 0 {
- dst.Group = src.Group
- dst.Version = src.Version
- }
-}
-
-// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple
-// byte buffers by pre-calculating the size of the final buffer needed.
-type bufferedMarshaller interface {
- proto.Sizer
- runtime.ProtobufMarshaller
-}
-
-// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently.
-type bufferedReverseMarshaller interface {
- proto.Sizer
- runtime.ProtobufReverseMarshaller
-}
-
-// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
-// object with a nil RawJSON struct and the expected size of the provided buffer. The
-// returned size will not be correct if RawJSOn is set on unk.
-func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 {
- size := uint64(unk.Size())
- // protobuf uses 1 byte for the tag, a varint for the length of the array (at most 8 bytes - uint64 - here),
- // and the size of the array.
- size += 1 + 8 + byteSize
- return size
-}
-
-// NewRawSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If typer
-// is not nil, the object has the group, version, and kind fields set. This serializer does not provide type information for the
-// encoded object, and thus is not self describing (callers must know what type is being described in order to decode).
-//
-// This encoding scheme is experimental, and is subject to change at any time.
-func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer {
- return &RawSerializer{
- creater: creater,
- typer: typer,
- }
-}
-
-// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying
-// type).
-type RawSerializer struct {
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
-}
-
-var _ runtime.Serializer = &RawSerializer{}
-
-const rawSerializerIdentifier runtime.Identifier = "raw-protobuf"
-
-// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
-// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
-// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
-// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
-// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
-// errors, the method will return the calculated schema kind.
-func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- if into == nil {
- return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s)
- }
-
- if len(originalData) == 0 {
- // TODO: treat like decoding {} from JSON with defaulting
- return nil, nil, fmt.Errorf("empty data")
- }
- data := originalData
-
- actual := &schema.GroupVersionKind{}
- copyKindDefaults(actual, gvk)
-
- if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
- intoUnknown.Raw = data
- intoUnknown.ContentEncoding = ""
- intoUnknown.ContentType = runtime.ContentTypeProtobuf
- intoUnknown.SetGroupVersionKind(*actual)
- return intoUnknown, actual, nil
- }
-
- types, _, err := s.typer.ObjectKinds(into)
- switch {
- case runtime.IsNotRegisteredError(err):
- pb, ok := into.(proto.Message)
- if !ok {
- return nil, actual, errNotMarshalable{reflect.TypeOf(into)}
- }
- if err := proto.Unmarshal(data, pb); err != nil {
- return nil, actual, err
- }
- return into, actual, nil
- case err != nil:
- return nil, actual, err
- default:
- copyKindDefaults(actual, &types[0])
- // if the result of defaulting did not set a version or group, ensure that at least group is set
- // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
- // of into is set if there is no better information from the caller or object.
- if len(actual.Version) == 0 && len(actual.Group) == 0 {
- actual.Group = types[0].Group
- }
- }
-
- if len(actual.Kind) == 0 {
- return nil, actual, runtime.NewMissingKindErr("<protobuf encoded body - must provide default type>")
- }
- if len(actual.Version) == 0 {
- return nil, actual, runtime.NewMissingVersionErr("<protobuf encoded body - must provide default type>")
- }
-
- return unmarshalToObject(s.typer, s.creater, actual, into, data)
-}
-
-// unmarshalToObject is the common code between decode in the raw and normal serializer.
-func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *schema.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *schema.GroupVersionKind, error) {
- // use the target if necessary
- obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into)
- if err != nil {
- return nil, actual, err
- }
-
- pb, ok := obj.(proto.Message)
- if !ok {
- return nil, actual, errNotMarshalable{reflect.TypeOf(obj)}
- }
- if err := proto.Unmarshal(data, pb); err != nil {
- return nil, actual, err
- }
- if actual != nil {
- obj.GetObjectKind().SetGroupVersionKind(*actual)
- }
- return obj, actual, nil
-}
-
-// Encode serializes the provided object to the given writer. Overrides is ignored.
-func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(s.Identifier(), s.doEncode, w)
- }
- return s.doEncode(obj, w)
-}
-
-func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
- switch t := obj.(type) {
- case bufferedReverseMarshaller:
- // this path performs a single allocation during write but requires the caller to implement
- // the more efficient Size and MarshalToSizedBuffer methods
- encodedSize := uint64(t.Size())
- data := make([]byte, encodedSize)
-
- n, err := t.MarshalToSizedBuffer(data)
- if err != nil {
- return err
- }
- _, err = w.Write(data[:n])
- return err
-
- case bufferedMarshaller:
- // this path performs a single allocation during write but requires the caller to implement
- // the more efficient Size and MarshalTo methods
- encodedSize := uint64(t.Size())
- data := make([]byte, encodedSize)
-
- n, err := t.MarshalTo(data)
- if err != nil {
- return err
- }
- _, err = w.Write(data[:n])
- return err
-
- case proto.Marshaler:
- // this path performs extra allocations
- data, err := t.Marshal()
- if err != nil {
- return err
- }
- _, err = w.Write(data)
- return err
-
- default:
- return errNotMarshalable{reflect.TypeOf(obj)}
- }
-}
-
-// Identifier implements runtime.Encoder interface.
-func (s *RawSerializer) Identifier() runtime.Identifier {
- return rawSerializerIdentifier
-}
-
-// LengthDelimitedFramer is exported variable of type lengthDelimitedFramer
-var LengthDelimitedFramer = lengthDelimitedFramer{}
-
-// Provides length delimited frame reader and writer methods
-type lengthDelimitedFramer struct{}
-
-// NewFrameWriter implements stream framing for this serializer
-func (lengthDelimitedFramer) NewFrameWriter(w io.Writer) io.Writer {
- return framer.NewLengthDelimitedFrameWriter(w)
-}
-
-// NewFrameReader implements stream framing for this serializer
-func (lengthDelimitedFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
- return framer.NewLengthDelimitedFrameReader(r)
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
deleted file mode 100644
index 38497ab53..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package recognizer
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-type RecognizingDecoder interface {
- runtime.Decoder
- // RecognizesData should return true if the input provided in the provided reader
- // belongs to this decoder, or an error if the data could not be read or is ambiguous.
- // Unknown is true if the data could not be determined to match the decoder type.
- // Decoders should assume that they can read as much of peek as they need (as the caller
- // provides) and may return unknown if the data provided is not sufficient to make a
- // a determination. When peek returns EOF that may mean the end of the input or the
- // end of buffered input - recognizers should return the best guess at that time.
- RecognizesData(peek io.Reader) (ok, unknown bool, err error)
-}
-
-// NewDecoder creates a decoder that will attempt multiple decoders in an order defined
-// by:
-//
-// 1. The decoder implements RecognizingDecoder and identifies the data
-// 2. All other decoders, and any decoder that returned true for unknown.
-//
-// The order passed to the constructor is preserved within those priorities.
-func NewDecoder(decoders ...runtime.Decoder) runtime.Decoder {
- return &decoder{
- decoders: decoders,
- }
-}
-
-type decoder struct {
- decoders []runtime.Decoder
-}
-
-var _ RecognizingDecoder = &decoder{}
-
-func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) {
- var (
- lastErr error
- anyUnknown bool
- )
- data, _ := bufio.NewReaderSize(peek, 1024).Peek(1024)
- for _, r := range d.decoders {
- switch t := r.(type) {
- case RecognizingDecoder:
- ok, unknown, err := t.RecognizesData(bytes.NewBuffer(data))
- if err != nil {
- lastErr = err
- continue
- }
- anyUnknown = anyUnknown || unknown
- if !ok {
- continue
- }
- return true, false, nil
- }
- }
- return false, anyUnknown, lastErr
-}
-
-func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- var (
- lastErr error
- skipped []runtime.Decoder
- )
-
- // try recognizers, record any decoders we need to give a chance later
- for _, r := range d.decoders {
- switch t := r.(type) {
- case RecognizingDecoder:
- buf := bytes.NewBuffer(data)
- ok, unknown, err := t.RecognizesData(buf)
- if err != nil {
- lastErr = err
- continue
- }
- if unknown {
- skipped = append(skipped, t)
- continue
- }
- if !ok {
- continue
- }
- return r.Decode(data, gvk, into)
- default:
- skipped = append(skipped, t)
- }
- }
-
- // try recognizers that returned unknown or didn't recognize their data
- for _, r := range skipped {
- out, actual, err := r.Decode(data, gvk, into)
- if err != nil {
- lastErr = err
- continue
- }
- return out, actual, nil
- }
-
- if lastErr == nil {
- lastErr = fmt.Errorf("no serialization format matched the provided data")
- }
- return nil, nil, lastErr
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
deleted file mode 100644
index a60a7c041..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package streaming implements encoder and decoder for streams
-// of runtime.Objects over io.Writer/Readers.
-package streaming
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// Encoder is a runtime.Encoder on a stream.
-type Encoder interface {
- // Encode will write the provided object to the stream or return an error. It obeys the same
- // contract as runtime.VersionedEncoder.
- Encode(obj runtime.Object) error
-}
-
-// Decoder is a runtime.Decoder from a stream.
-type Decoder interface {
- // Decode will return io.EOF when no more objects are available.
- Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error)
- // Close closes the underlying stream.
- Close() error
-}
-
-// Serializer is a factory for creating encoders and decoders that work over streams.
-type Serializer interface {
- NewEncoder(w io.Writer) Encoder
- NewDecoder(r io.ReadCloser) Decoder
-}
-
-type decoder struct {
- reader io.ReadCloser
- decoder runtime.Decoder
- buf []byte
- maxBytes int
- resetRead bool
-}
-
-// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d.
-// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read
-// an entire object.
-func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
- return &decoder{
- reader: r,
- decoder: d,
- buf: make([]byte, 1024),
- maxBytes: 16 * 1024 * 1024,
- }
-}
-
-var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size")
-
-// Decode reads the next object from the stream and decodes it.
-func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- base := 0
- for {
- n, err := d.reader.Read(d.buf[base:])
- if err == io.ErrShortBuffer {
- if n == 0 {
- return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
- }
- if d.resetRead {
- continue
- }
- // double the buffer size up to maxBytes
- if len(d.buf) < d.maxBytes {
- base += n
- d.buf = append(d.buf, make([]byte, len(d.buf))...)
- continue
- }
- // must read the rest of the frame (until we stop getting ErrShortBuffer)
- d.resetRead = true
- base = 0
- return nil, nil, ErrObjectTooLarge
- }
- if err != nil {
- return nil, nil, err
- }
- if d.resetRead {
- // now that we have drained the large read, continue
- d.resetRead = false
- continue
- }
- base += n
- break
- }
- return d.decoder.Decode(d.buf[:base], defaults, into)
-}
-
-func (d *decoder) Close() error {
- return d.reader.Close()
-}
-
-type encoder struct {
- writer io.Writer
- encoder runtime.Encoder
- buf *bytes.Buffer
-}
-
-// NewEncoder returns a new streaming encoder.
-func NewEncoder(w io.Writer, e runtime.Encoder) Encoder {
- return &encoder{
- writer: w,
- encoder: e,
- buf: &bytes.Buffer{},
- }
-}
-
-// Encode writes the provided object to the nested writer.
-func (e *encoder) Encode(obj runtime.Object) error {
- if err := e.encoder.Encode(obj, e.buf); err != nil {
- return err
- }
- _, err := e.writer.Write(e.buf.Bytes())
- e.buf.Reset()
- return err
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
deleted file mode 100644
index 718c5dfb7..000000000
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package versioning
-
-import (
- "encoding/json"
- "io"
- "reflect"
- "sync"
-
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/klog/v2"
-)
-
-// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
-func NewDefaultingCodecForScheme(
- // TODO: I should be a scheme interface?
- scheme *runtime.Scheme,
- encoder runtime.Encoder,
- decoder runtime.Decoder,
- encodeVersion runtime.GroupVersioner,
- decodeVersion runtime.GroupVersioner,
-) runtime.Codec {
- return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion, scheme.Name())
-}
-
-// NewCodec takes objects in their internal versions and converts them to external versions before
-// serializing them. It assumes the serializer provided to it only deals with external versions.
-// This class is also a serializer, but is generally used with a specific version.
-func NewCodec(
- encoder runtime.Encoder,
- decoder runtime.Decoder,
- convertor runtime.ObjectConvertor,
- creater runtime.ObjectCreater,
- typer runtime.ObjectTyper,
- defaulter runtime.ObjectDefaulter,
- encodeVersion runtime.GroupVersioner,
- decodeVersion runtime.GroupVersioner,
- originalSchemeName string,
-) runtime.Codec {
- internal := &codec{
- encoder: encoder,
- decoder: decoder,
- convertor: convertor,
- creater: creater,
- typer: typer,
- defaulter: defaulter,
-
- encodeVersion: encodeVersion,
- decodeVersion: decodeVersion,
-
- identifier: identifier(encodeVersion, encoder),
-
- originalSchemeName: originalSchemeName,
- }
- return internal
-}
-
-type codec struct {
- encoder runtime.Encoder
- decoder runtime.Decoder
- convertor runtime.ObjectConvertor
- creater runtime.ObjectCreater
- typer runtime.ObjectTyper
- defaulter runtime.ObjectDefaulter
-
- encodeVersion runtime.GroupVersioner
- decodeVersion runtime.GroupVersioner
-
- identifier runtime.Identifier
-
- // originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates
- originalSchemeName string
-}
-
-var identifiersMap sync.Map
-
-type codecIdentifier struct {
- EncodeGV string `json:"encodeGV,omitempty"`
- Encoder string `json:"encoder,omitempty"`
- Name string `json:"name,omitempty"`
-}
-
-// identifier computes Identifier of Encoder based on codec parameters.
-func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier {
- result := codecIdentifier{
- Name: "versioning",
- }
-
- if encodeGV != nil {
- result.EncodeGV = encodeGV.Identifier()
- }
- if encoder != nil {
- result.Encoder = string(encoder.Identifier())
- }
- if id, ok := identifiersMap.Load(result); ok {
- return id.(runtime.Identifier)
- }
- identifier, err := json.Marshal(result)
- if err != nil {
- klog.Fatalf("Failed marshaling identifier for codec: %v", err)
- }
- identifiersMap.Store(result, runtime.Identifier(identifier))
- return runtime.Identifier(identifier)
-}
-
-// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is
-// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an
-// into that matches the serialized version.
-func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
- // If the into object is unstructured and expresses an opinion about its group/version,
- // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`)
- decodeInto := into
- if into != nil {
- if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() {
- decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object)
- }
- }
-
- obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto)
- if err != nil {
- return nil, gvk, err
- }
-
- if d, ok := obj.(runtime.NestedObjectDecoder); ok {
- if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil {
- return nil, gvk, err
- }
- }
-
- // if we specify a target, use generic conversion.
- if into != nil {
- // perform defaulting if requested
- if c.defaulter != nil {
- c.defaulter.Default(obj)
- }
-
- // Short-circuit conversion if the into object is same object
- if into == obj {
- return into, gvk, nil
- }
-
- if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil {
- return nil, gvk, err
- }
-
- return into, gvk, nil
- }
-
- // perform defaulting if requested
- if c.defaulter != nil {
- c.defaulter.Default(obj)
- }
-
- out, err := c.convertor.ConvertToVersion(obj, c.decodeVersion)
- if err != nil {
- return nil, gvk, err
- }
- return out, gvk, nil
-}
-
-// Encode ensures the provided object is output in the appropriate group and version, invoking
-// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
-func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
- if co, ok := obj.(runtime.CacheableObject); ok {
- return co.CacheEncode(c.Identifier(), c.doEncode, w)
- }
- return c.doEncode(obj, w)
-}
-
-func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
- switch obj := obj.(type) {
- case *runtime.Unknown:
- return c.encoder.Encode(obj, w)
- case runtime.Unstructured:
- // An unstructured list can contain objects of multiple group version kinds. don't short-circuit just
- // because the top-level type matches our desired destination type. actually send the object to the converter
- // to give it a chance to convert the list items if needed.
- if _, ok := obj.(*unstructured.UnstructuredList); !ok {
- // avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl)
- objGVK := obj.GetObjectKind().GroupVersionKind()
- if len(objGVK.Version) == 0 {
- return c.encoder.Encode(obj, w)
- }
- targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK})
- if !ok {
- return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion)
- }
- if targetGVK == objGVK {
- return c.encoder.Encode(obj, w)
- }
- }
- }
-
- gvks, isUnversioned, err := c.typer.ObjectKinds(obj)
- if err != nil {
- return err
- }
-
- objectKind := obj.GetObjectKind()
- old := objectKind.GroupVersionKind()
- // restore the old GVK after encoding
- defer objectKind.SetGroupVersionKind(old)
-
- if c.encodeVersion == nil || isUnversioned {
- if e, ok := obj.(runtime.NestedObjectEncoder); ok {
- if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
- return err
- }
- }
- objectKind.SetGroupVersionKind(gvks[0])
- return c.encoder.Encode(obj, w)
- }
-
- // Perform a conversion if necessary
- out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion)
- if err != nil {
- return err
- }
-
- if e, ok := out.(runtime.NestedObjectEncoder); ok {
- if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
- return err
- }
- }
-
- // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
- return c.encoder.Encode(out, w)
-}
-
-// Identifier implements runtime.Encoder interface.
-func (c *codec) Identifier() runtime.Identifier {
- return c.identifier
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
deleted file mode 100644
index 3e1e2517b..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package clock
-
-import (
- "sync"
- "time"
-)
-
-// PassiveClock allows for injecting fake or real clocks into code
-// that needs to read the current time but does not support scheduling
-// activity in the future.
-type PassiveClock interface {
- Now() time.Time
- Since(time.Time) time.Duration
-}
-
-// Clock allows for injecting fake or real clocks into code that
-// needs to do arbitrary things based on time.
-type Clock interface {
- PassiveClock
- After(time.Duration) <-chan time.Time
- NewTimer(time.Duration) Timer
- Sleep(time.Duration)
- NewTicker(time.Duration) Ticker
-}
-
-// RealClock really calls time.Now()
-type RealClock struct{}
-
-// Now returns the current time.
-func (RealClock) Now() time.Time {
- return time.Now()
-}
-
-// Since returns time since the specified timestamp.
-func (RealClock) Since(ts time.Time) time.Duration {
- return time.Since(ts)
-}
-
-// After is the same as time.After(d).
-func (RealClock) After(d time.Duration) <-chan time.Time {
- return time.After(d)
-}
-
-// NewTimer returns a new Timer.
-func (RealClock) NewTimer(d time.Duration) Timer {
- return &realTimer{
- timer: time.NewTimer(d),
- }
-}
-
-// NewTicker returns a new Ticker.
-func (RealClock) NewTicker(d time.Duration) Ticker {
- return &realTicker{
- ticker: time.NewTicker(d),
- }
-}
-
-// Sleep pauses the RealClock for duration d.
-func (RealClock) Sleep(d time.Duration) {
- time.Sleep(d)
-}
-
-// FakePassiveClock implements PassiveClock, but returns an arbitrary time.
-type FakePassiveClock struct {
- lock sync.RWMutex
- time time.Time
-}
-
-// FakeClock implements Clock, but returns an arbitrary time.
-type FakeClock struct {
- FakePassiveClock
-
- // waiters are waiting for the fake time to pass their specified time
- waiters []fakeClockWaiter
-}
-
-type fakeClockWaiter struct {
- targetTime time.Time
- stepInterval time.Duration
- skipIfBlocked bool
- destChan chan time.Time
-}
-
-// NewFakePassiveClock returns a new FakePassiveClock.
-func NewFakePassiveClock(t time.Time) *FakePassiveClock {
- return &FakePassiveClock{
- time: t,
- }
-}
-
-// NewFakeClock returns a new FakeClock
-func NewFakeClock(t time.Time) *FakeClock {
- return &FakeClock{
- FakePassiveClock: *NewFakePassiveClock(t),
- }
-}
-
-// Now returns f's time.
-func (f *FakePassiveClock) Now() time.Time {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return f.time
-}
-
-// Since returns time since the time in f.
-func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return f.time.Sub(ts)
-}
-
-// SetTime sets the time on the FakePassiveClock.
-func (f *FakePassiveClock) SetTime(t time.Time) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.time = t
-}
-
-// After is the Fake version of time.After(d).
-func (f *FakeClock) After(d time.Duration) <-chan time.Time {
- f.lock.Lock()
- defer f.lock.Unlock()
- stopTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // Don't block!
- f.waiters = append(f.waiters, fakeClockWaiter{
- targetTime: stopTime,
- destChan: ch,
- })
- return ch
-}
-
-// NewTimer is the Fake version of time.NewTimer(d).
-func (f *FakeClock) NewTimer(d time.Duration) Timer {
- f.lock.Lock()
- defer f.lock.Unlock()
- stopTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // Don't block!
- timer := &fakeTimer{
- fakeClock: f,
- waiter: fakeClockWaiter{
- targetTime: stopTime,
- destChan: ch,
- },
- }
- f.waiters = append(f.waiters, timer.waiter)
- return timer
-}
-
-// NewTicker returns a new Ticker.
-func (f *FakeClock) NewTicker(d time.Duration) Ticker {
- f.lock.Lock()
- defer f.lock.Unlock()
- tickTime := f.time.Add(d)
- ch := make(chan time.Time, 1) // hold one tick
- f.waiters = append(f.waiters, fakeClockWaiter{
- targetTime: tickTime,
- stepInterval: d,
- skipIfBlocked: true,
- destChan: ch,
- })
-
- return &fakeTicker{
- c: ch,
- }
-}
-
-// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer
-func (f *FakeClock) Step(d time.Duration) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.setTimeLocked(f.time.Add(d))
-}
-
-// SetTime sets the time on a FakeClock.
-func (f *FakeClock) SetTime(t time.Time) {
- f.lock.Lock()
- defer f.lock.Unlock()
- f.setTimeLocked(t)
-}
-
-// Actually changes the time and checks any waiters. f must be write-locked.
-func (f *FakeClock) setTimeLocked(t time.Time) {
- f.time = t
- newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
- for i := range f.waiters {
- w := &f.waiters[i]
- if !w.targetTime.After(t) {
-
- if w.skipIfBlocked {
- select {
- case w.destChan <- t:
- default:
- }
- } else {
- w.destChan <- t
- }
-
- if w.stepInterval > 0 {
- for !w.targetTime.After(t) {
- w.targetTime = w.targetTime.Add(w.stepInterval)
- }
- newWaiters = append(newWaiters, *w)
- }
-
- } else {
- newWaiters = append(newWaiters, f.waiters[i])
- }
- }
- f.waiters = newWaiters
-}
-
-// HasWaiters returns true if After has been called on f but not yet satisfied (so you can
-// write race-free tests).
-func (f *FakeClock) HasWaiters() bool {
- f.lock.RLock()
- defer f.lock.RUnlock()
- return len(f.waiters) > 0
-}
-
-// Sleep pauses the FakeClock for duration d.
-func (f *FakeClock) Sleep(d time.Duration) {
- f.Step(d)
-}
-
-// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
-type IntervalClock struct {
- Time time.Time
- Duration time.Duration
-}
-
-// Now returns i's time.
-func (i *IntervalClock) Now() time.Time {
- i.Time = i.Time.Add(i.Duration)
- return i.Time
-}
-
-// Since returns time since the time in i.
-func (i *IntervalClock) Since(ts time.Time) time.Duration {
- return i.Time.Sub(ts)
-}
-
-// After is currently unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) After(d time.Duration) <-chan time.Time {
- panic("IntervalClock doesn't implement After")
-}
-
-// NewTimer is currently unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) NewTimer(d time.Duration) Timer {
- panic("IntervalClock doesn't implement NewTimer")
-}
-
-// NewTicker is currently unimplemented, will panic.
-// TODO: make interval clock use FakeClock so this can be implemented.
-func (*IntervalClock) NewTicker(d time.Duration) Ticker {
- panic("IntervalClock doesn't implement NewTicker")
-}
-
-// Sleep is currently unimplemented; will panic.
-func (*IntervalClock) Sleep(d time.Duration) {
- panic("IntervalClock doesn't implement Sleep")
-}
-
-// Timer allows for injecting fake or real timers into code that
-// needs to do arbitrary things based on time.
-type Timer interface {
- C() <-chan time.Time
- Stop() bool
- Reset(d time.Duration) bool
-}
-
-// realTimer is backed by an actual time.Timer.
-type realTimer struct {
- timer *time.Timer
-}
-
-// C returns the underlying timer's channel.
-func (r *realTimer) C() <-chan time.Time {
- return r.timer.C
-}
-
-// Stop calls Stop() on the underlying timer.
-func (r *realTimer) Stop() bool {
- return r.timer.Stop()
-}
-
-// Reset calls Reset() on the underlying timer.
-func (r *realTimer) Reset(d time.Duration) bool {
- return r.timer.Reset(d)
-}
-
-// fakeTimer implements Timer based on a FakeClock.
-type fakeTimer struct {
- fakeClock *FakeClock
- waiter fakeClockWaiter
-}
-
-// C returns the channel that notifies when this timer has fired.
-func (f *fakeTimer) C() <-chan time.Time {
- return f.waiter.destChan
-}
-
-// Stop conditionally stops the timer. If the timer has neither fired
-// nor been stopped then this call stops the timer and returns true,
-// otherwise this call returns false. This is like time.Timer::Stop.
-func (f *fakeTimer) Stop() bool {
- f.fakeClock.lock.Lock()
- defer f.fakeClock.lock.Unlock()
- // The timer has already fired or been stopped, unless it is found
- // among the clock's waiters.
- stopped := false
- oldWaiters := f.fakeClock.waiters
- newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters))
- seekChan := f.waiter.destChan
- for i := range oldWaiters {
- // Identify the timer's fakeClockWaiter by the identity of the
- // destination channel, nothing else is necessarily unique and
- // constant since the timer's creation.
- if oldWaiters[i].destChan == seekChan {
- stopped = true
- } else {
- newWaiters = append(newWaiters, oldWaiters[i])
- }
- }
-
- f.fakeClock.waiters = newWaiters
-
- return stopped
-}
-
-// Reset conditionally updates the firing time of the timer. If the
-// timer has neither fired nor been stopped then this call resets the
-// timer to the fake clock's "now" + d and returns true, otherwise
-// it creates a new waiter, adds it to the clock, and returns true.
-//
-// It is not possible to return false, because a fake timer can be reset
-// from any state (waiting to fire, already fired, and stopped).
-//
-// See the GoDoc for time.Timer::Reset for more context on why
-// the return value of Reset() is not useful.
-func (f *fakeTimer) Reset(d time.Duration) bool {
- f.fakeClock.lock.Lock()
- defer f.fakeClock.lock.Unlock()
- waiters := f.fakeClock.waiters
- seekChan := f.waiter.destChan
- for i := range waiters {
- if waiters[i].destChan == seekChan {
- waiters[i].targetTime = f.fakeClock.time.Add(d)
- return true
- }
- }
- // No existing waiter, timer has already fired or been reset.
- // We should still enable Reset() to succeed by creating a
- // new waiter and adding it to the clock's waiters.
- newWaiter := fakeClockWaiter{
- targetTime: f.fakeClock.time.Add(d),
- destChan: seekChan,
- }
- f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter)
- return true
-}
-
-// Ticker defines the Ticker interface
-type Ticker interface {
- C() <-chan time.Time
- Stop()
-}
-
-type realTicker struct {
- ticker *time.Ticker
-}
-
-func (t *realTicker) C() <-chan time.Time {
- return t.ticker.C
-}
-
-func (t *realTicker) Stop() {
- t.ticker.Stop()
-}
-
-type fakeTicker struct {
- c <-chan time.Time
-}
-
-func (t *fakeTicker) C() <-chan time.Time {
- return t.c
-}
-
-func (t *fakeTicker) Stop() {
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
deleted file mode 100644
index 45aa74bf5..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package framer implements simple frame decoding techniques for an io.ReadCloser
-package framer
-
-import (
- "encoding/binary"
- "encoding/json"
- "io"
-)
-
-type lengthDelimitedFrameWriter struct {
- w io.Writer
- h [4]byte
-}
-
-func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer {
- return &lengthDelimitedFrameWriter{w: w}
-}
-
-// Write writes a single frame to the nested writer, prepending it with the length in
-// in bytes of data (as a 4 byte, bigendian uint32).
-func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) {
- binary.BigEndian.PutUint32(w.h[:], uint32(len(data)))
- n, err := w.w.Write(w.h[:])
- if err != nil {
- return 0, err
- }
- if n != len(w.h) {
- return 0, io.ErrShortWrite
- }
- return w.w.Write(data)
-}
-
-type lengthDelimitedFrameReader struct {
- r io.ReadCloser
- remaining int
-}
-
-// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed
-// frames off of a stream.
-//
-// The protocol is:
-//
-// stream: message ...
-// message: prefix body
-// prefix: 4 byte uint32 in BigEndian order, denotes length of body
-// body: bytes (0..prefix)
-//
-// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead
-// will be returned along with the number of bytes read.
-func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser {
- return &lengthDelimitedFrameReader{r: r}
-}
-
-// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer
-// is returned and subsequent calls will attempt to read the last frame. A frame is complete when
-// err is nil.
-func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
- if r.remaining <= 0 {
- header := [4]byte{}
- n, err := io.ReadAtLeast(r.r, header[:4], 4)
- if err != nil {
- return 0, err
- }
- if n != 4 {
- return 0, io.ErrUnexpectedEOF
- }
- frameLength := int(binary.BigEndian.Uint32(header[:]))
- r.remaining = frameLength
- }
-
- expect := r.remaining
- max := expect
- if max > len(data) {
- max = len(data)
- }
- n, err := io.ReadAtLeast(r.r, data[:max], int(max))
- r.remaining -= n
- if err == io.ErrShortBuffer || r.remaining > 0 {
- return n, io.ErrShortBuffer
- }
- if err != nil {
- return n, err
- }
- if n != expect {
- return n, io.ErrUnexpectedEOF
- }
-
- return n, nil
-}
-
-func (r *lengthDelimitedFrameReader) Close() error {
- return r.r.Close()
-}
-
-type jsonFrameReader struct {
- r io.ReadCloser
- decoder *json.Decoder
- remaining []byte
-}
-
-// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off
-// of a wire.
-//
-// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate
-// the read.
-func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser {
- return &jsonFrameReader{
- r: r,
- decoder: json.NewDecoder(r),
- }
-}
-
-// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned
-// byte slice will be modified the next time ReadFrame is invoked and should not be altered.
-func (r *jsonFrameReader) Read(data []byte) (int, error) {
- // Return whatever remaining data exists from an in progress frame
- if n := len(r.remaining); n > 0 {
- if n <= len(data) {
- //lint:ignore SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], r.remaining...)
- r.remaining = nil
- return n, nil
- }
-
- n = len(data)
- //lint:ignore SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], r.remaining[:n]...)
- r.remaining = r.remaining[n:]
- return n, io.ErrShortBuffer
- }
-
- // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
- // data written to data, or be larger than data and a different array.
- n := len(data)
- m := json.RawMessage(data[:0])
- if err := r.decoder.Decode(&m); err != nil {
- return 0, err
- }
-
- // If capacity of data is less than length of the message, decoder will allocate a new slice
- // and set m to it, which means we need to copy the partial result back into data and preserve
- // the remaining result for subsequent reads.
- if len(m) > n {
- //lint:ignore SA4006,SA4010 underlying array of data is modified here.
- data = append(data[0:0], m[:n]...)
- r.remaining = m[n:]
- return n, io.ErrShortBuffer
- }
- return len(m), nil
-}
-
-func (r *jsonFrameReader) Close() error {
- return r.r.Close()
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
deleted file mode 100644
index 5893df5bd..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package httpstream adds multiplexed streaming support to HTTP requests and
-// responses via connection upgrades.
-package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
deleted file mode 100644
index 00ce5f785..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package httpstream
-
-import (
- "fmt"
- "io"
- "net/http"
- "strings"
- "time"
-)
-
-const (
- HeaderConnection = "Connection"
- HeaderUpgrade = "Upgrade"
- HeaderProtocolVersion = "X-Stream-Protocol-Version"
- HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions"
-)
-
-// NewStreamHandler defines a function that is called when a new Stream is
-// received. If no error is returned, the Stream is accepted; otherwise,
-// the stream is rejected. After the reply frame has been sent, replySent is closed.
-type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error
-
-// NoOpNewStreamHandler is a stream handler that accepts a new stream and
-// performs no other logic.
-func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil }
-
-// Dialer knows how to open a streaming connection to a server.
-type Dialer interface {
-
- // Dial opens a streaming connection to a server using one of the protocols
- // specified (in order of most preferred to least preferred).
- Dial(protocols ...string) (Connection, string, error)
-}
-
-// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade
-// HTTP requests to support multiplexed bidirectional streams. After RoundTrip()
-// is invoked, if the upgrade is successful, clients may retrieve the upgraded
-// connection by calling UpgradeRoundTripper.Connection().
-type UpgradeRoundTripper interface {
- http.RoundTripper
- // NewConnection validates the response and creates a new Connection.
- NewConnection(resp *http.Response) (Connection, error)
-}
-
-// ResponseUpgrader knows how to upgrade HTTP requests and responses to
-// add streaming support to them.
-type ResponseUpgrader interface {
- // UpgradeResponse upgrades an HTTP response to one that supports multiplexed
- // streams. newStreamHandler will be called asynchronously whenever the
- // other end of the upgraded connection creates a new stream.
- UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection
-}
-
-// Connection represents an upgraded HTTP connection.
-type Connection interface {
- // CreateStream creates a new Stream with the supplied headers.
- CreateStream(headers http.Header) (Stream, error)
- // Close resets all streams and closes the connection.
- Close() error
- // CloseChan returns a channel that is closed when the underlying connection is closed.
- CloseChan() <-chan bool
- // SetIdleTimeout sets the amount of time the connection may remain idle before
- // it is automatically closed.
- SetIdleTimeout(timeout time.Duration)
-}
-
-// Stream represents a bidirectional communications channel that is part of an
-// upgraded connection.
-type Stream interface {
- io.ReadWriteCloser
- // Reset closes both directions of the stream, indicating that neither client
- // or server can use it any more.
- Reset() error
- // Headers returns the headers used to create the stream.
- Headers() http.Header
- // Identifier returns the stream's ID.
- Identifier() uint32
-}
-
-// IsUpgradeRequest returns true if the given request is a connection upgrade request
-func IsUpgradeRequest(req *http.Request) bool {
- for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {
- if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) {
- return true
- }
- }
- return false
-}
-
-func negotiateProtocol(clientProtocols, serverProtocols []string) string {
- for i := range clientProtocols {
- for j := range serverProtocols {
- if clientProtocols[i] == serverProtocols[j] {
- return clientProtocols[i]
- }
- }
- }
- return ""
-}
-
-func commaSeparatedHeaderValues(header []string) []string {
- var parsedClientProtocols []string
- for i := range header {
- for _, clientProtocol := range strings.Split(header[i], ",") {
- if proto := strings.Trim(clientProtocol, " "); len(proto) > 0 {
- parsedClientProtocols = append(parsedClientProtocols, proto)
- }
- }
- }
- return parsedClientProtocols
-}
-
-// Handshake performs a subprotocol negotiation. If the client did request a
-// subprotocol, Handshake will select the first common value found in
-// serverProtocols. If a match is found, Handshake adds a response header
-// indicating the chosen subprotocol. If no match is found, HTTP forbidden is
-// returned, along with a response header containing the list of protocols the
-// server can accept.
-func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
- clientProtocols := commaSeparatedHeaderValues(req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)])
- if len(clientProtocols) == 0 {
- return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion)
- }
-
- if len(serverProtocols) == 0 {
- panic(fmt.Errorf("unable to upgrade: serverProtocols is required"))
- }
-
- negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
- if len(negotiatedProtocol) == 0 {
- for i := range serverProtocols {
- w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i])
- }
- err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
- http.Error(w, err.Error(), http.StatusForbidden)
- return "", err
- }
-
- w.Header().Add(HeaderProtocolVersion, negotiatedProtocol)
- return negotiatedProtocol, nil
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
deleted file mode 100644
index 336b4908b..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "net"
- "net/http"
- "sync"
- "time"
-
- "github.com/docker/spdystream"
- "k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/klog/v2"
-)
-
-// connection maintains state about a spdystream.Connection and its associated
-// streams.
-type connection struct {
- conn *spdystream.Connection
- streams []httpstream.Stream
- streamLock sync.Mutex
- newStreamHandler httpstream.NewStreamHandler
- ping func() (time.Duration, error)
-}
-
-// NewClientConnection creates a new SPDY client connection.
-func NewClientConnection(conn net.Conn) (httpstream.Connection, error) {
- return NewClientConnectionWithPings(conn, 0)
-}
-
-// NewClientConnectionWithPings creates a new SPDY client connection.
-//
-// If pingPeriod is non-zero, a background goroutine will send periodic Ping
-// frames to the server. Use this to keep idle connections through certain load
-// balancers alive longer.
-func NewClientConnectionWithPings(conn net.Conn, pingPeriod time.Duration) (httpstream.Connection, error) {
- spdyConn, err := spdystream.NewConnection(conn, false)
- if err != nil {
- defer conn.Close()
- return nil, err
- }
-
- return newConnection(spdyConn, httpstream.NoOpNewStreamHandler, pingPeriod, spdyConn.Ping), nil
-}
-
-// NewServerConnection creates a new SPDY server connection. newStreamHandler
-// will be invoked when the server receives a newly created stream from the
-// client.
-func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) {
- return NewServerConnectionWithPings(conn, newStreamHandler, 0)
-}
-
-// NewServerConnectionWithPings creates a new SPDY server connection.
-// newStreamHandler will be invoked when the server receives a newly created
-// stream from the client.
-//
-// If pingPeriod is non-zero, a background goroutine will send periodic Ping
-// frames to the server. Use this to keep idle connections through certain load
-// balancers alive longer.
-func NewServerConnectionWithPings(conn net.Conn, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration) (httpstream.Connection, error) {
- spdyConn, err := spdystream.NewConnection(conn, true)
- if err != nil {
- defer conn.Close()
- return nil, err
- }
-
- return newConnection(spdyConn, newStreamHandler, pingPeriod, spdyConn.Ping), nil
-}
-
-// newConnection returns a new connection wrapping conn. newStreamHandler
-// will be invoked when the server receives a newly created stream from the
-// client.
-func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection {
- c := &connection{conn: conn, newStreamHandler: newStreamHandler, ping: pingFn}
- go conn.Serve(c.newSpdyStream)
- if pingPeriod > 0 && pingFn != nil {
- go c.sendPings(pingPeriod)
- }
- return c
-}
-
-// createStreamResponseTimeout indicates how long to wait for the other side to
-// acknowledge the new stream before timing out.
-const createStreamResponseTimeout = 30 * time.Second
-
-// Close first sends a reset for all of the connection's streams, and then
-// closes the underlying spdystream.Connection.
-func (c *connection) Close() error {
- c.streamLock.Lock()
- for _, s := range c.streams {
- // calling Reset instead of Close ensures that all streams are fully torn down
- s.Reset()
- }
- c.streams = make([]httpstream.Stream, 0)
- c.streamLock.Unlock()
-
- // now that all streams are fully torn down, it's safe to call close on the underlying connection,
- // which should be able to terminate immediately at this point, instead of waiting for any
- // remaining graceful stream termination.
- return c.conn.Close()
-}
-
-// CreateStream creates a new stream with the specified headers and registers
-// it with the connection.
-func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) {
- stream, err := c.conn.CreateStream(headers, nil, false)
- if err != nil {
- return nil, err
- }
- if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil {
- return nil, err
- }
-
- c.registerStream(stream)
- return stream, nil
-}
-
-// registerStream adds the stream s to the connection's list of streams that
-// it owns.
-func (c *connection) registerStream(s httpstream.Stream) {
- c.streamLock.Lock()
- c.streams = append(c.streams, s)
- c.streamLock.Unlock()
-}
-
-// CloseChan returns a channel that, when closed, indicates that the underlying
-// spdystream.Connection has been closed.
-func (c *connection) CloseChan() <-chan bool {
- return c.conn.CloseChan()
-}
-
-// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve.
-// It calls connection's newStreamHandler, giving it the opportunity to accept or reject
-// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the
-// stream is accepted and registered with the connection.
-func (c *connection) newSpdyStream(stream *spdystream.Stream) {
- replySent := make(chan struct{})
- err := c.newStreamHandler(stream, replySent)
- rejectStream := (err != nil)
- if rejectStream {
- klog.Warningf("Stream rejected: %v", err)
- stream.Reset()
- return
- }
-
- c.registerStream(stream)
- stream.SendReply(http.Header{}, rejectStream)
- close(replySent)
-}
-
-// SetIdleTimeout sets the amount of time the connection may remain idle before
-// it is automatically closed.
-func (c *connection) SetIdleTimeout(timeout time.Duration) {
- c.conn.SetIdleTimeout(timeout)
-}
-
-func (c *connection) sendPings(period time.Duration) {
- t := time.NewTicker(period)
- defer t.Stop()
- for {
- select {
- case <-c.conn.CloseChan():
- return
- case <-t.C:
- }
- if _, err := c.ping(); err != nil {
- klog.V(3).Infof("SPDY Ping failed: %v", err)
- // Continue, in case this is a transient failure.
- // c.conn.CloseChan above will tell us when the connection is
- // actually closed.
- }
- }
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
deleted file mode 100644
index 4cb1cfadc..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "bufio"
- "bytes"
- "context"
- "crypto/tls"
- "encoding/base64"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
- "strings"
- "time"
-
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/serializer"
- "k8s.io/apimachinery/pkg/util/httpstream"
- utilnet "k8s.io/apimachinery/pkg/util/net"
- "k8s.io/apimachinery/third_party/forked/golang/netutil"
-)
-
-// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports
-// multiplexed streams. After RoundTrip() is invoked, Conn will be set
-// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface.
-type SpdyRoundTripper struct {
- //tlsConfig holds the TLS configuration settings to use when connecting
- //to the remote server.
- tlsConfig *tls.Config
-
- /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper
- must be safe for use by multiple concurrent goroutines. If this is absolutely
- necessary, we could keep a map from http.Request to net.Conn. In practice,
- a client will create an http.Client, set the transport to a new insteace of
- SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue.
- */
- // conn is the underlying network connection to the remote server.
- conn net.Conn
-
- // Dialer is the dialer used to connect. Used if non-nil.
- Dialer *net.Dialer
-
- // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment
- // Used primarily for mocking the proxy discovery in tests.
- proxier func(req *http.Request) (*url.URL, error)
-
- // followRedirects indicates if the round tripper should examine responses for redirects and
- // follow them.
- followRedirects bool
- // requireSameHostRedirects restricts redirect following to only follow redirects to the same host
- // as the original request.
- requireSameHostRedirects bool
- // pingPeriod is a period for sending Ping frames over established
- // connections.
- pingPeriod time.Duration
-}
-
-var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
-var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
-var _ utilnet.Dialer = &SpdyRoundTripper{}
-
-// NewRoundTripper creates a new SpdyRoundTripper that will use the specified
-// tlsConfig.
-func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
- return NewRoundTripperWithConfig(RoundTripperConfig{
- TLS: tlsConfig,
- FollowRedirects: followRedirects,
- RequireSameHostRedirects: requireSameHostRedirects,
- })
-}
-
-// NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the
-// specified tlsConfig and proxy func.
-func NewRoundTripperWithProxy(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {
- return NewRoundTripperWithConfig(RoundTripperConfig{
- TLS: tlsConfig,
- FollowRedirects: followRedirects,
- RequireSameHostRedirects: requireSameHostRedirects,
- Proxier: proxier,
- })
-}
-
-// NewRoundTripperWithProxy creates a new SpdyRoundTripper with the specified
-// configuration.
-func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper {
- if cfg.Proxier == nil {
- cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
- }
- return &SpdyRoundTripper{
- tlsConfig: cfg.TLS,
- followRedirects: cfg.FollowRedirects,
- requireSameHostRedirects: cfg.RequireSameHostRedirects,
- proxier: cfg.Proxier,
- pingPeriod: cfg.PingPeriod,
- }
-}
-
-// RoundTripperConfig is a set of options for an SpdyRoundTripper.
-type RoundTripperConfig struct {
- // TLS configuration used by the round tripper.
- TLS *tls.Config
- // Proxier is a proxy function invoked on each request. Optional.
- Proxier func(*http.Request) (*url.URL, error)
- // PingPeriod is a period for sending SPDY Pings on the connection.
- // Optional.
- PingPeriod time.Duration
-
- FollowRedirects bool
- RequireSameHostRedirects bool
-}
-
-// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
-// proxying with a spdy roundtripper.
-func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {
- return s.tlsConfig
-}
-
-// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer.
-func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
- conn, err := s.dial(req)
- if err != nil {
- return nil, err
- }
-
- if err := req.Write(conn); err != nil {
- conn.Close()
- return nil, err
- }
-
- return conn, nil
-}
-
-// dial dials the host specified by req, using TLS if appropriate, optionally
-// using a proxy server if one is configured via environment variables.
-func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
- proxyURL, err := s.proxier(req)
- if err != nil {
- return nil, err
- }
-
- if proxyURL == nil {
- return s.dialWithoutProxy(req.Context(), req.URL)
- }
-
- // ensure we use a canonical host with proxyReq
- targetHost := netutil.CanonicalAddr(req.URL)
-
- // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support
- proxyReq := http.Request{
- Method: "CONNECT",
- URL: &url.URL{},
- Host: targetHost,
- }
-
- if pa := s.proxyAuth(proxyURL); pa != "" {
- proxyReq.Header = http.Header{}
- proxyReq.Header.Set("Proxy-Authorization", pa)
- }
-
- proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL)
- if err != nil {
- return nil, err
- }
-
- proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)
- _, err = proxyClientConn.Do(&proxyReq)
- if err != nil && err != httputil.ErrPersistEOF {
- return nil, err
- }
-
- rwc, _ := proxyClientConn.Hijack()
-
- if req.URL.Scheme != "https" {
- return rwc, nil
- }
-
- host, _, err := net.SplitHostPort(targetHost)
- if err != nil {
- return nil, err
- }
-
- tlsConfig := s.tlsConfig
- switch {
- case tlsConfig == nil:
- tlsConfig = &tls.Config{ServerName: host}
- case len(tlsConfig.ServerName) == 0:
- tlsConfig = tlsConfig.Clone()
- tlsConfig.ServerName = host
- }
-
- tlsConn := tls.Client(rwc, tlsConfig)
-
- // need to manually call Handshake() so we can call VerifyHostname() below
- if err := tlsConn.Handshake(); err != nil {
- return nil, err
- }
-
- // Return if we were configured to skip validation
- if tlsConfig.InsecureSkipVerify {
- return tlsConn, nil
- }
-
- if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
- return nil, err
- }
-
- return tlsConn, nil
-}
-
-// dialWithoutProxy dials the host specified by url, using TLS if appropriate.
-func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {
- dialAddr := netutil.CanonicalAddr(url)
-
- if url.Scheme == "http" {
- if s.Dialer == nil {
- var d net.Dialer
- return d.DialContext(ctx, "tcp", dialAddr)
- } else {
- return s.Dialer.DialContext(ctx, "tcp", dialAddr)
- }
- }
-
- // TODO validate the TLSClientConfig is set up?
- var conn *tls.Conn
- var err error
- if s.Dialer == nil {
- conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig)
- } else {
- conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig)
- }
- if err != nil {
- return nil, err
- }
-
- // Return if we were configured to skip validation
- if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
- return conn, nil
- }
-
- host, _, err := net.SplitHostPort(dialAddr)
- if err != nil {
- return nil, err
- }
- if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
- host = s.tlsConfig.ServerName
- }
- err = conn.VerifyHostname(host)
- if err != nil {
- return nil, err
- }
-
- return conn, nil
-}
-
-// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header
-func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {
- if proxyURL == nil || proxyURL.User == nil {
- return ""
- }
- credentials := proxyURL.User.String()
- encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials))
- return fmt.Sprintf("Basic %s", encodedAuth)
-}
-
-// RoundTrip executes the Request and upgrades it. After a successful upgrade,
-// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded
-// connection.
-func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- header := utilnet.CloneHeader(req.Header)
- header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
- header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
-
- var (
- conn net.Conn
- rawResponse []byte
- err error
- )
-
- if s.followRedirects {
- conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects)
- } else {
- clone := utilnet.CloneRequest(req)
- clone.Header = header
- conn, err = s.Dial(clone)
- }
- if err != nil {
- return nil, err
- }
-
- responseReader := bufio.NewReader(
- io.MultiReader(
- bytes.NewBuffer(rawResponse),
- conn,
- ),
- )
-
- resp, err := http.ReadResponse(responseReader, nil)
- if err != nil {
- if conn != nil {
- conn.Close()
- }
- return nil, err
- }
-
- s.conn = conn
-
- return resp, nil
-}
-
-// NewConnection validates the upgrade response, creating and returning a new
-// httpstream.Connection if there were no errors.
-func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {
- connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection))
- upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade))
- if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
- defer resp.Body.Close()
- responseError := ""
- responseErrorBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- responseError = "unable to read error from server response"
- } else {
- // TODO: I don't belong here, I should be abstracted from this class
- if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil {
- if status, ok := obj.(*metav1.Status); ok {
- return nil, &apierrors.StatusError{ErrStatus: *status}
- }
- }
- responseError = string(responseErrorBytes)
- responseError = strings.TrimSpace(responseError)
- }
-
- return nil, fmt.Errorf("unable to upgrade connection: %s", responseError)
- }
-
- return NewClientConnectionWithPings(s.conn, s.pingPeriod)
-}
-
-// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection
-var statusScheme = runtime.NewScheme()
-
-// ParameterCodec knows about query parameters used with the meta v1 API spec.
-var statusCodecs = serializer.NewCodecFactory(statusScheme)
-
-func init() {
- statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion,
- &metav1.Status{},
- )
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
deleted file mode 100644
index f17eb09e9..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "bufio"
- "fmt"
- "io"
- "net"
- "net/http"
- "strings"
- "sync/atomic"
- "time"
-
- "k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-const HeaderSpdy31 = "SPDY/3.1"
-
-// responseUpgrader knows how to upgrade HTTP responses. It
-// implements the httpstream.ResponseUpgrader interface.
-type responseUpgrader struct {
- pingPeriod time.Duration
-}
-
-// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All
-// calls will be handled directly by the underlying net.Conn with the exception
-// of Read and Close calls, which will consider data in the bufio.Reader. This
-// ensures that data already inside the used bufio.Reader instance is also
-// read.
-type connWrapper struct {
- net.Conn
- closed int32
- bufReader *bufio.Reader
-}
-
-func (w *connWrapper) Read(b []byte) (n int, err error) {
- if atomic.LoadInt32(&w.closed) == 1 {
- return 0, io.EOF
- }
- return w.bufReader.Read(b)
-}
-
-func (w *connWrapper) Close() error {
- err := w.Conn.Close()
- atomic.StoreInt32(&w.closed, 1)
- return err
-}
-
-// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is
-// capable of upgrading HTTP responses using SPDY/3.1 via the
-// spdystream package.
-func NewResponseUpgrader() httpstream.ResponseUpgrader {
- return NewResponseUpgraderWithPings(0)
-}
-
-// NewResponseUpgraderWithPings returns a new httpstream.ResponseUpgrader that
-// is capable of upgrading HTTP responses using SPDY/3.1 via the spdystream
-// package.
-//
-// If pingPeriod is non-zero, for each incoming connection a background
-// goroutine will send periodic Ping frames to the server. Use this to keep
-// idle connections through certain load balancers alive longer.
-func NewResponseUpgraderWithPings(pingPeriod time.Duration) httpstream.ResponseUpgrader {
- return responseUpgrader{pingPeriod: pingPeriod}
-}
-
-// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
-// streams. newStreamHandler will be called synchronously whenever the
-// other end of the upgraded connection creates a new stream.
-func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
- connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
- upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
- if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
- errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header)
- http.Error(w, errorMsg, http.StatusBadRequest)
- return nil
- }
-
- hijacker, ok := w.(http.Hijacker)
- if !ok {
- errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response")
- http.Error(w, errorMsg, http.StatusInternalServerError)
- return nil
- }
-
- w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
- w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31)
- w.WriteHeader(http.StatusSwitchingProtocols)
-
- conn, bufrw, err := hijacker.Hijack()
- if err != nil {
- runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err))
- return nil
- }
-
- connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader}
- spdyConn, err := NewServerConnectionWithPings(connWithBuf, newStreamHandler, u.pingPeriod)
- if err != nil {
- runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err))
- return nil
- }
-
- return spdyConn
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
deleted file mode 100644
index acfeb827c..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "time"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
- DefaultStreamCreationTimeout = 30 * time.Second
-
- // The SPDY subprotocol "channel.k8s.io" is used for remote command
- // attachment/execution. This represents the initial unversioned subprotocol,
- // which has the known bugs http://issues.k8s.io/13394 and
- // http://issues.k8s.io/13395.
- StreamProtocolV1Name = "channel.k8s.io"
-
- // The SPDY subprotocol "v2.channel.k8s.io" is used for remote command
- // attachment/execution. It is the second version of the subprotocol and
- // resolves the issues present in the first version.
- StreamProtocolV2Name = "v2.channel.k8s.io"
-
- // The SPDY subprotocol "v3.channel.k8s.io" is used for remote command
- // attachment/execution. It is the third version of the subprotocol and
- // adds support for resizing container terminals.
- StreamProtocolV3Name = "v3.channel.k8s.io"
-
- // The SPDY subprotocol "v4.channel.k8s.io" is used for remote command
- // attachment/execution. It is the 4th version of the subprotocol and
- // adds support for exit codes.
- StreamProtocolV4Name = "v4.channel.k8s.io"
-
- NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode")
- ExitCodeCauseType = metav1.CauseType("ExitCode")
-)
-
-var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
deleted file mode 100644
index 7fe706467..000000000
--- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package yaml
-
-import (
- "bufio"
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "strings"
- "unicode"
-
- jsonutil "k8s.io/apimachinery/pkg/util/json"
-
- "k8s.io/klog/v2"
- "sigs.k8s.io/yaml"
-)
-
-// Unmarshal unmarshals the given data
-// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers
-// are converted to int64 or float64
-func Unmarshal(data []byte, v interface{}) error {
- preserveIntFloat := func(d *json.Decoder) *json.Decoder {
- d.UseNumber()
- return d
- }
- switch v := v.(type) {
- case *map[string]interface{}:
- if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil {
- return err
- }
- return jsonutil.ConvertMapNumbers(*v, 0)
- case *[]interface{}:
- if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil {
- return err
- }
- return jsonutil.ConvertSliceNumbers(*v, 0)
- case *interface{}:
- if err := yaml.Unmarshal(data, v, preserveIntFloat); err != nil {
- return err
- }
- return jsonutil.ConvertInterfaceNumbers(v, 0)
- default:
- return yaml.Unmarshal(data, v)
- }
-}
-
-// ToJSON converts a single YAML document into a JSON document
-// or returns an error. If the document appears to be JSON the
-// YAML decoding path is not used (so that error messages are
-// JSON specific).
-func ToJSON(data []byte) ([]byte, error) {
- if hasJSONPrefix(data) {
- return data, nil
- }
- return yaml.YAMLToJSON(data)
-}
-
-// YAMLToJSONDecoder decodes YAML documents from an io.Reader by
-// separating individual documents. It first converts the YAML
-// body to JSON, then unmarshals the JSON.
-type YAMLToJSONDecoder struct {
- reader Reader
-}
-
-// NewYAMLToJSONDecoder decodes YAML documents from the provided
-// stream in chunks by converting each document (as defined by
-// the YAML spec) into its own chunk, converting it to JSON via
-// yaml.YAMLToJSON, and then passing it to json.Decoder.
-func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
- reader := bufio.NewReader(r)
- return &YAMLToJSONDecoder{
- reader: NewYAMLReader(reader),
- }
-}
-
-// Decode reads a YAML document as JSON from the stream or returns
-// an error. The decoding rules match json.Unmarshal, not
-// yaml.Unmarshal.
-func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
- bytes, err := d.reader.Read()
- if err != nil && err != io.EOF {
- return err
- }
-
- if len(bytes) != 0 {
- err := yaml.Unmarshal(bytes, into)
- if err != nil {
- return YAMLSyntaxError{err}
- }
- }
- return err
-}
-
-// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
-// the data is not sufficient.
-type YAMLDecoder struct {
- r io.ReadCloser
- scanner *bufio.Scanner
- remaining []byte
-}
-
-// NewDocumentDecoder decodes YAML documents from the provided
-// stream in chunks by converting each document (as defined by
-// the YAML spec) into its own chunk. io.ErrShortBuffer will be
-// returned if the entire buffer could not be read to assist
-// the caller in framing the chunk.
-func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser {
- scanner := bufio.NewScanner(r)
- // the size of initial allocation for buffer 4k
- buf := make([]byte, 4*1024)
- // the maximum size used to buffer a token 5M
- scanner.Buffer(buf, 5*1024*1024)
- scanner.Split(splitYAMLDocument)
- return &YAMLDecoder{
- r: r,
- scanner: scanner,
- }
-}
-
-// Read reads the previous slice into the buffer, or attempts to read
-// the next chunk.
-// TODO: switch to readline approach.
-func (d *YAMLDecoder) Read(data []byte) (n int, err error) {
- left := len(d.remaining)
- if left == 0 {
- // return the next chunk from the stream
- if !d.scanner.Scan() {
- err := d.scanner.Err()
- if err == nil {
- err = io.EOF
- }
- return 0, err
- }
- out := d.scanner.Bytes()
- d.remaining = out
- left = len(out)
- }
-
- // fits within data
- if left <= len(data) {
- copy(data, d.remaining)
- d.remaining = nil
- return left, nil
- }
-
- // caller will need to reread
- copy(data, d.remaining[:len(data)])
- d.remaining = d.remaining[len(data):]
- return len(data), io.ErrShortBuffer
-}
-
-func (d *YAMLDecoder) Close() error {
- return d.r.Close()
-}
-
-const yamlSeparator = "\n---"
-const separator = "---"
-
-// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents.
-func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) {
- if atEOF && len(data) == 0 {
- return 0, nil, nil
- }
- sep := len([]byte(yamlSeparator))
- if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 {
- // We have a potential document terminator
- i += sep
- after := data[i:]
- if len(after) == 0 {
- // we can't read any more characters
- if atEOF {
- return len(data), data[:len(data)-sep], nil
- }
- return 0, nil, nil
- }
- if j := bytes.IndexByte(after, '\n'); j >= 0 {
- return i + j + 1, data[0 : i-sep], nil
- }
- return 0, nil, nil
- }
- // If we're at EOF, we have a final, non-terminated line. Return it.
- if atEOF {
- return len(data), data, nil
- }
- // Request more data.
- return 0, nil, nil
-}
-
-// decoder is a convenience interface for Decode.
-type decoder interface {
- Decode(into interface{}) error
-}
-
-// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
-// YAML documents by sniffing for a leading { character.
-type YAMLOrJSONDecoder struct {
- r io.Reader
- bufferSize int
-
- decoder decoder
- rawData []byte
-}
-
-type JSONSyntaxError struct {
- Line int
- Err error
-}
-
-func (e JSONSyntaxError) Error() string {
- return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error())
-}
-
-type YAMLSyntaxError struct {
- err error
-}
-
-func (e YAMLSyntaxError) Error() string {
- return e.err.Error()
-}
-
-// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents
-// or JSON documents from the given reader as a stream. bufferSize determines
-// how far into the stream the decoder will look to figure out whether this
-// is a JSON stream (has whitespace followed by an open brace).
-func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
- return &YAMLOrJSONDecoder{
- r: r,
- bufferSize: bufferSize,
- }
-}
-
-// Decode unmarshals the next object from the underlying stream into the
-// provide object, or returns an error.
-func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
- if d.decoder == nil {
- buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
- if isJSON {
- d.decoder = json.NewDecoder(buffer)
- d.rawData = origData
- } else {
- d.decoder = NewYAMLToJSONDecoder(buffer)
- }
- }
- err := d.decoder.Decode(into)
- if jsonDecoder, ok := d.decoder.(*json.Decoder); ok {
- if syntax, ok := err.(*json.SyntaxError); ok {
- data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
- if readErr != nil {
- klog.V(4).Infof("reading stream failed: %v", readErr)
- }
- js := string(data)
-
- // if contents from io.Reader are not complete,
- // use the original raw data to prevent panic
- if int64(len(js)) <= syntax.Offset {
- js = string(d.rawData)
- }
-
- start := strings.LastIndex(js[:syntax.Offset], "\n") + 1
- line := strings.Count(js[:start], "\n")
- return JSONSyntaxError{
- Line: line,
- Err: fmt.Errorf(syntax.Error()),
- }
- }
- }
- return err
-}
-
-type Reader interface {
- Read() ([]byte, error)
-}
-
-type YAMLReader struct {
- reader Reader
-}
-
-func NewYAMLReader(r *bufio.Reader) *YAMLReader {
- return &YAMLReader{
- reader: &LineReader{reader: r},
- }
-}
-
-// Read returns a full YAML document.
-func (r *YAMLReader) Read() ([]byte, error) {
- var buffer bytes.Buffer
- for {
- line, err := r.reader.Read()
- if err != nil && err != io.EOF {
- return nil, err
- }
-
- sep := len([]byte(separator))
- if i := bytes.Index(line, []byte(separator)); i == 0 {
- // We have a potential document terminator
- i += sep
- after := line[i:]
- if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 {
- if buffer.Len() != 0 {
- return buffer.Bytes(), nil
- }
- if err == io.EOF {
- return nil, err
- }
- }
- }
- if err == io.EOF {
- if buffer.Len() != 0 {
- // If we're at EOF, we have a final, non-terminated line. Return it.
- return buffer.Bytes(), nil
- }
- return nil, err
- }
- buffer.Write(line)
- }
-}
-
-type LineReader struct {
- reader *bufio.Reader
-}
-
-// Read returns a single line (with '\n' ended) from the underlying reader.
-// An error is returned iff there is an error with the underlying reader.
-func (r *LineReader) Read() ([]byte, error) {
- var (
- isPrefix bool = true
- err error = nil
- line []byte
- buffer bytes.Buffer
- )
-
- for isPrefix && err == nil {
- line, isPrefix, err = r.reader.ReadLine()
- buffer.Write(line)
- }
- buffer.WriteByte('\n')
- return buffer.Bytes(), err
-}
-
-// GuessJSONStream scans the provided reader up to size, looking
-// for an open brace indicating this is JSON. It will return the
-// bufio.Reader it creates for the consumer.
-func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
- buffer := bufio.NewReaderSize(r, size)
- b, _ := buffer.Peek(size)
- return buffer, b, hasJSONPrefix(b)
-}
-
-var jsonPrefix = []byte("{")
-
-// hasJSONPrefix returns true if the provided buffer appears to start with
-// a JSON open brace.
-func hasJSONPrefix(buf []byte) bool {
- return hasPrefix(buf, jsonPrefix)
-}
-
-// Return true if the first non-whitespace bytes in buf is
-// prefix.
-func hasPrefix(buf []byte, prefix []byte) bool {
- trim := bytes.TrimLeftFunc(buf, unicode.IsSpace)
- return bytes.HasPrefix(trim, prefix)
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go
deleted file mode 100644
index 29574fd6d..000000000
--- a/vendor/k8s.io/apimachinery/pkg/version/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:openapi-gen=true
-
-// Package version supplies the type for version information collected at build time.
-package version // import "k8s.io/apimachinery/pkg/version"
diff --git a/vendor/k8s.io/apimachinery/pkg/version/helpers.go b/vendor/k8s.io/apimachinery/pkg/version/helpers.go
deleted file mode 100644
index 5e041d6f3..000000000
--- a/vendor/k8s.io/apimachinery/pkg/version/helpers.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package version
-
-import (
- "regexp"
- "strconv"
- "strings"
-)
-
-type versionType int
-
-const (
- // Bigger the version type number, higher priority it is
- versionTypeAlpha versionType = iota
- versionTypeBeta
- versionTypeGA
-)
-
-var kubeVersionRegex = regexp.MustCompile("^v([\\d]+)(?:(alpha|beta)([\\d]+))?$")
-
-func parseKubeVersion(v string) (majorVersion int, vType versionType, minorVersion int, ok bool) {
- var err error
- submatches := kubeVersionRegex.FindStringSubmatch(v)
- if len(submatches) != 4 {
- return 0, 0, 0, false
- }
- switch submatches[2] {
- case "alpha":
- vType = versionTypeAlpha
- case "beta":
- vType = versionTypeBeta
- case "":
- vType = versionTypeGA
- default:
- return 0, 0, 0, false
- }
- if majorVersion, err = strconv.Atoi(submatches[1]); err != nil {
- return 0, 0, 0, false
- }
- if vType != versionTypeGA {
- if minorVersion, err = strconv.Atoi(submatches[3]); err != nil {
- return 0, 0, 0, false
- }
- }
- return majorVersion, vType, minorVersion, true
-}
-
-// CompareKubeAwareVersionStrings compares two kube-like version strings.
-// Kube-like version strings are starting with a v, followed by a major version, optional "alpha" or "beta" strings
-// followed by a minor version (e.g. v1, v2beta1). Versions will be sorted based on GA/alpha/beta first and then major
-// and minor versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1.
-func CompareKubeAwareVersionStrings(v1, v2 string) int {
- if v1 == v2 {
- return 0
- }
- v1major, v1type, v1minor, ok1 := parseKubeVersion(v1)
- v2major, v2type, v2minor, ok2 := parseKubeVersion(v2)
- switch {
- case !ok1 && !ok2:
- return strings.Compare(v2, v1)
- case !ok1 && ok2:
- return -1
- case ok1 && !ok2:
- return 1
- }
- if v1type != v2type {
- return int(v1type) - int(v2type)
- }
- if v1major != v2major {
- return v1major - v2major
- }
- return v1minor - v2minor
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go
deleted file mode 100644
index 72727b503..000000000
--- a/vendor/k8s.io/apimachinery/pkg/version/types.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package version
-
-// Info contains versioning information.
-// TODO: Add []string of api versions supported? It's still unclear
-// how we'll want to distribute that information.
-type Info struct {
- Major string `json:"major"`
- Minor string `json:"minor"`
- GitVersion string `json:"gitVersion"`
- GitCommit string `json:"gitCommit"`
- GitTreeState string `json:"gitTreeState"`
- BuildDate string `json:"buildDate"`
- GoVersion string `json:"goVersion"`
- Compiler string `json:"compiler"`
- Platform string `json:"platform"`
-}
-
-// String returns info as a human-friendly version string.
-func (info Info) String() string {
- return info.GitVersion
-}
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
deleted file mode 100644
index c70f431c2..000000000
--- a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package netutil
-
-import (
- "net/url"
- "strings"
-)
-
-// FROM: http://golang.org/src/net/http/client.go
-// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
-// return true if the string includes a port.
-func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
-
-// FROM: http://golang.org/src/net/http/transport.go
-var portMap = map[string]string{
- "http": "80",
- "https": "443",
-}
-
-// FROM: http://golang.org/src/net/http/transport.go
-// canonicalAddr returns url.Host but always with a ":port" suffix
-func CanonicalAddr(url *url.URL) string {
- addr := url.Host
- if !hasPort(addr) {
- return addr + ":" + portMap[url.Scheme]
- }
- return addr
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS
deleted file mode 100644
index e0ec62deb..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS
+++ /dev/null
@@ -1,9 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-# approval on api packages bubbles to api-approvers
-reviewers:
-- sig-auth-authenticators-approvers
-- sig-auth-authenticators-reviewers
-labels:
-- sig/auth
-
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
deleted file mode 100644
index b99459757..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:deepcopy-gen=package
-// +groupName=client.authentication.k8s.io
-
-package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go
deleted file mode 100644
index e4fbc3ea9..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package clientauthentication
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// GroupName is the group name use in this package
-const GroupName = "client.authentication.k8s.io"
-
-// SchemeGroupVersion is group version used to register these objects
-var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
-
-// Kind takes an unqualified kind and returns a Group qualified GroupKind
-func Kind(kind string) schema.GroupKind {
- return SchemeGroupVersion.WithKind(kind).GroupKind()
-}
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
-var (
- SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
- AddToScheme = SchemeBuilder.AddToScheme
-)
-
-func addKnownTypes(scheme *runtime.Scheme) error {
- scheme.AddKnownTypes(SchemeGroupVersion,
- &ExecCredential{},
- )
- return nil
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
deleted file mode 100644
index 6fb53cecf..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package clientauthentication
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ExecCredentials is used by exec-based plugins to communicate credentials to
-// HTTP transports.
-type ExecCredential struct {
- metav1.TypeMeta
-
- // Spec holds information passed to the plugin by the transport. This contains
- // request and runtime specific information, such as if the session is interactive.
- Spec ExecCredentialSpec
-
- // Status is filled in by the plugin and holds the credentials that the transport
- // should use to contact the API.
- // +optional
- Status *ExecCredentialStatus
-}
-
-// ExecCredenitalSpec holds request and runtime specific information provided by
-// the transport.
-type ExecCredentialSpec struct {
- // Response is populated when the transport encounters HTTP status codes, such as 401,
- // suggesting previous credentials were invalid.
- // +optional
- Response *Response
-
- // Interactive is true when the transport detects the command is being called from an
- // interactive prompt.
- // +optional
- Interactive bool
-}
-
-// ExecCredentialStatus holds credentials for the transport to use.
-type ExecCredentialStatus struct {
- // ExpirationTimestamp indicates a time when the provided credentials expire.
- // +optional
- ExpirationTimestamp *metav1.Time
- // Token is a bearer token used by the client for request authentication.
- // +optional
- Token string
- // PEM-encoded client TLS certificate.
- // +optional
- ClientCertificateData string
- // PEM-encoded client TLS private key.
- // +optional
- ClientKeyData string
-}
-
-// Response defines metadata about a failed request, including HTTP status code and
-// response headers.
-type Response struct {
- // Headers holds HTTP headers returned by the server.
- Header map[string][]string
- // Code is the HTTP status code returned by the server.
- Code int32
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go
deleted file mode 100644
index 19ab77614..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:deepcopy-gen=package
-// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
-// +k8s:openapi-gen=true
-// +k8s:defaulter-gen=TypeMeta
-
-// +groupName=client.authentication.k8s.io
-
-package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go
deleted file mode 100644
index 2acd13dea..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// GroupName is the group name use in this package
-const GroupName = "client.authentication.k8s.io"
-
-// SchemeGroupVersion is group version used to register these objects
-var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
-var (
- SchemeBuilder runtime.SchemeBuilder
- localSchemeBuilder = &SchemeBuilder
- AddToScheme = localSchemeBuilder.AddToScheme
-)
-
-func init() {
- // We only register manually written functions here. The registration of the
- // generated functions takes place in the generated files. The separation
- // makes the code compile even when the generated files are missing.
- localSchemeBuilder.Register(addKnownTypes)
-}
-
-func addKnownTypes(scheme *runtime.Scheme) error {
- scheme.AddKnownTypes(SchemeGroupVersion,
- &ExecCredential{},
- )
- metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
- return nil
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go
deleted file mode 100644
index c714e2457..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ExecCredential is used by exec-based plugins to communicate credentials to
-// HTTP transports.
-type ExecCredential struct {
- metav1.TypeMeta `json:",inline"`
-
- // Spec holds information passed to the plugin by the transport. This contains
- // request and runtime specific information, such as if the session is interactive.
- Spec ExecCredentialSpec `json:"spec,omitempty"`
-
- // Status is filled in by the plugin and holds the credentials that the transport
- // should use to contact the API.
- // +optional
- Status *ExecCredentialStatus `json:"status,omitempty"`
-}
-
-// ExecCredenitalSpec holds request and runtime specific information provided by
-// the transport.
-type ExecCredentialSpec struct {
- // Response is populated when the transport encounters HTTP status codes, such as 401,
- // suggesting previous credentials were invalid.
- // +optional
- Response *Response `json:"response,omitempty"`
-
- // Interactive is true when the transport detects the command is being called from an
- // interactive prompt.
- // +optional
- Interactive bool `json:"interactive,omitempty"`
-}
-
-// ExecCredentialStatus holds credentials for the transport to use.
-//
-// Token and ClientKeyData are sensitive fields. This data should only be
-// transmitted in-memory between client and exec plugin process. Exec plugin
-// itself should at least be protected via file permissions.
-type ExecCredentialStatus struct {
- // ExpirationTimestamp indicates a time when the provided credentials expire.
- // +optional
- ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"`
- // Token is a bearer token used by the client for request authentication.
- Token string `json:"token,omitempty"`
- // PEM-encoded client TLS certificates (including intermediates, if any).
- ClientCertificateData string `json:"clientCertificateData,omitempty"`
- // PEM-encoded private key for the above certificate.
- ClientKeyData string `json:"clientKeyData,omitempty"`
-}
-
-// Response defines metadata about a failed request, including HTTP status code and
-// response headers.
-type Response struct {
- // Header holds HTTP headers returned by the server.
- Header map[string][]string `json:"header,omitempty"`
- // Code is the HTTP status code returned by the server.
- Code int32 `json:"code,omitempty"`
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go
deleted file mode 100644
index 461c20b29..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1alpha1
-
-import (
- unsafe "unsafe"
-
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*Response)(nil), (*clientauthentication.Response)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1alpha1_Response_To_clientauthentication_Response(a.(*Response), b.(*clientauthentication.Response), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*clientauthentication.Response)(nil), (*Response)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_clientauthentication_Response_To_v1alpha1_Response(a.(*clientauthentication.Response), b.(*Response), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
- if err := Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status))
- return nil
-}
-
-// Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function.
-func Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
- return autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s)
-}
-
-func autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
- if err := Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status))
- return nil
-}
-
-// Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential is an autogenerated conversion function.
-func Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
- return autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in, out, s)
-}
-
-func autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
- out.Response = (*clientauthentication.Response)(unsafe.Pointer(in.Response))
- out.Interactive = in.Interactive
- return nil
-}
-
-// Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function.
-func Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
- return autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s)
-}
-
-func autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
- out.Response = (*Response)(unsafe.Pointer(in.Response))
- out.Interactive = in.Interactive
- return nil
-}
-
-// Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec is an autogenerated conversion function.
-func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
- return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s)
-}
-
-func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
- out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
- out.Token = in.Token
- out.ClientCertificateData = in.ClientCertificateData
- out.ClientKeyData = in.ClientKeyData
- return nil
-}
-
-// Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function.
-func Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
- return autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s)
-}
-
-func autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
- out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
- out.Token = in.Token
- out.ClientCertificateData = in.ClientCertificateData
- out.ClientKeyData = in.ClientKeyData
- return nil
-}
-
-// Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus is an autogenerated conversion function.
-func Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
- return autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in, out, s)
-}
-
-func autoConvert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error {
- out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header))
- out.Code = in.Code
- return nil
-}
-
-// Convert_v1alpha1_Response_To_clientauthentication_Response is an autogenerated conversion function.
-func Convert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error {
- return autoConvert_v1alpha1_Response_To_clientauthentication_Response(in, out, s)
-}
-
-func autoConvert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error {
- out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header))
- out.Code = in.Code
- return nil
-}
-
-// Convert_clientauthentication_Response_To_v1alpha1_Response is an autogenerated conversion function.
-func Convert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error {
- return autoConvert_clientauthentication_Response_To_v1alpha1_Response(in, out, s)
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go
deleted file mode 100644
index a73d31b3f..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package v1alpha1
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.Spec.DeepCopyInto(&out.Spec)
- if in.Status != nil {
- in, out := &in.Status, &out.Status
- *out = new(ExecCredentialStatus)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
-func (in *ExecCredential) DeepCopy() *ExecCredential {
- if in == nil {
- return nil
- }
- out := new(ExecCredential)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ExecCredential) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
- *out = *in
- if in.Response != nil {
- in, out := &in.Response, &out.Response
- *out = new(Response)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
-func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
- if in == nil {
- return nil
- }
- out := new(ExecCredentialSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
- *out = *in
- if in.ExpirationTimestamp != nil {
- in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
- *out = (*in).DeepCopy()
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
-func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
- if in == nil {
- return nil
- }
- out := new(ExecCredentialStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Response) DeepCopyInto(out *Response) {
- *out = *in
- if in.Header != nil {
- in, out := &in.Header, &out.Header
- *out = make(map[string][]string, len(*in))
- for key, val := range *in {
- var outVal []string
- if val == nil {
- (*out)[key] = nil
- } else {
- in, out := &val, &outVal
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- (*out)[key] = outVal
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response.
-func (in *Response) DeepCopy() *Response {
- if in == nil {
- return nil
- }
- out := new(Response)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go
deleted file mode 100644
index dd621a3ac..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by defaulter-gen. DO NOT EDIT.
-
-package v1alpha1
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// RegisterDefaults adds defaulters functions to the given scheme.
-// Public to allow building arbitrary schemes.
-// All generated defaulters are covering - they call all nested defaulters.
-func RegisterDefaults(scheme *runtime.Scheme) error {
- return nil
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
deleted file mode 100644
index f543806ac..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- conversion "k8s.io/apimachinery/pkg/conversion"
- clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
-)
-
-func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
- return nil
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
deleted file mode 100644
index 22d1c588b..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:deepcopy-gen=package
-// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
-// +k8s:openapi-gen=true
-// +k8s:defaulter-gen=TypeMeta
-
-// +groupName=client.authentication.k8s.io
-
-package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
deleted file mode 100644
index 0bb92f16a..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// GroupName is the group name use in this package
-const GroupName = "client.authentication.k8s.io"
-
-// SchemeGroupVersion is group version used to register these objects
-var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
-var (
- SchemeBuilder runtime.SchemeBuilder
- localSchemeBuilder = &SchemeBuilder
- AddToScheme = localSchemeBuilder.AddToScheme
-)
-
-func init() {
- // We only register manually written functions here. The registration of the
- // generated functions takes place in the generated files. The separation
- // makes the code compile even when the generated files are missing.
- localSchemeBuilder.Register(addKnownTypes)
-}
-
-func addKnownTypes(scheme *runtime.Scheme) error {
- scheme.AddKnownTypes(SchemeGroupVersion,
- &ExecCredential{},
- )
- metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
- return nil
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
deleted file mode 100644
index d6e267452..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ExecCredentials is used by exec-based plugins to communicate credentials to
-// HTTP transports.
-type ExecCredential struct {
- metav1.TypeMeta `json:",inline"`
-
- // Spec holds information passed to the plugin by the transport. This contains
- // request and runtime specific information, such as if the session is interactive.
- Spec ExecCredentialSpec `json:"spec,omitempty"`
-
- // Status is filled in by the plugin and holds the credentials that the transport
- // should use to contact the API.
- // +optional
- Status *ExecCredentialStatus `json:"status,omitempty"`
-}
-
-// ExecCredenitalSpec holds request and runtime specific information provided by
-// the transport.
-type ExecCredentialSpec struct{}
-
-// ExecCredentialStatus holds credentials for the transport to use.
-//
-// Token and ClientKeyData are sensitive fields. This data should only be
-// transmitted in-memory between client and exec plugin process. Exec plugin
-// itself should at least be protected via file permissions.
-type ExecCredentialStatus struct {
- // ExpirationTimestamp indicates a time when the provided credentials expire.
- // +optional
- ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"`
- // Token is a bearer token used by the client for request authentication.
- Token string `json:"token,omitempty"`
- // PEM-encoded client TLS certificates (including intermediates, if any).
- ClientCertificateData string `json:"clientCertificateData,omitempty"`
- // PEM-encoded private key for the above certificate.
- ClientKeyData string `json:"clientKeyData,omitempty"`
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
deleted file mode 100644
index 94ef4b733..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by conversion-gen. DO NOT EDIT.
-
-package v1beta1
-
-import (
- unsafe "unsafe"
-
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- conversion "k8s.io/apimachinery/pkg/conversion"
- runtime "k8s.io/apimachinery/pkg/runtime"
- clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
-)
-
-func init() {
- localSchemeBuilder.Register(RegisterConversions)
-}
-
-// RegisterConversions adds conversion functions to the given scheme.
-// Public to allow building arbitrary schemes.
-func RegisterConversions(s *runtime.Scheme) error {
- if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope)
- }); err != nil {
- return err
- }
- if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
- return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
- }); err != nil {
- return err
- }
- return nil
-}
-
-func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
- if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status))
- return nil
-}
-
-// Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function.
-func Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
- return autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s)
-}
-
-func autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
- if err := Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
- return err
- }
- out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status))
- return nil
-}
-
-// Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential is an autogenerated conversion function.
-func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
- return autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in, out, s)
-}
-
-func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
- return nil
-}
-
-// Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function.
-func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
- return autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s)
-}
-
-func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
- // WARNING: in.Response requires manual conversion: does not exist in peer-type
- // WARNING: in.Interactive requires manual conversion: does not exist in peer-type
- return nil
-}
-
-func autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
- out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
- out.Token = in.Token
- out.ClientCertificateData = in.ClientCertificateData
- out.ClientKeyData = in.ClientKeyData
- return nil
-}
-
-// Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function.
-func Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
- return autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s)
-}
-
-func autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
- out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
- out.Token = in.Token
- out.ClientCertificateData = in.ClientCertificateData
- out.ClientKeyData = in.ClientKeyData
- return nil
-}
-
-// Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus is an autogenerated conversion function.
-func Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
- return autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in, out, s)
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
deleted file mode 100644
index 736b8cf00..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package v1beta1
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- out.Spec = in.Spec
- if in.Status != nil {
- in, out := &in.Status, &out.Status
- *out = new(ExecCredentialStatus)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
-func (in *ExecCredential) DeepCopy() *ExecCredential {
- if in == nil {
- return nil
- }
- out := new(ExecCredential)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ExecCredential) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
-func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
- if in == nil {
- return nil
- }
- out := new(ExecCredentialSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
- *out = *in
- if in.ExpirationTimestamp != nil {
- in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
- *out = (*in).DeepCopy()
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
-func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
- if in == nil {
- return nil
- }
- out := new(ExecCredentialStatus)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
deleted file mode 100644
index 73e63fc11..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by defaulter-gen. DO NOT EDIT.
-
-package v1beta1
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// RegisterDefaults adds defaulters functions to the given scheme.
-// Public to allow building arbitrary schemes.
-// All generated defaulters are covering - they call all nested defaulters.
-func RegisterDefaults(scheme *runtime.Scheme) error {
- return nil
-}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
deleted file mode 100644
index c568a6fc8..000000000
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package clientauthentication
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.Spec.DeepCopyInto(&out.Spec)
- if in.Status != nil {
- in, out := &in.Status, &out.Status
- *out = new(ExecCredentialStatus)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
-func (in *ExecCredential) DeepCopy() *ExecCredential {
- if in == nil {
- return nil
- }
- out := new(ExecCredential)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ExecCredential) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
- *out = *in
- if in.Response != nil {
- in, out := &in.Response, &out.Response
- *out = new(Response)
- (*in).DeepCopyInto(*out)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
-func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
- if in == nil {
- return nil
- }
- out := new(ExecCredentialSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
- *out = *in
- if in.ExpirationTimestamp != nil {
- in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
- *out = (*in).DeepCopy()
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
-func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
- if in == nil {
- return nil
- }
- out := new(ExecCredentialStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Response) DeepCopyInto(out *Response) {
- *out = *in
- if in.Header != nil {
- in, out := &in.Header, &out.Header
- *out = make(map[string][]string, len(*in))
- for key, val := range *in {
- var outVal []string
- if val == nil {
- (*out)[key] = nil
- } else {
- in, out := &val, &outVal
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- (*out)[key] = outVal
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response.
-func (in *Response) DeepCopy() *Response {
- if in == nil {
- return nil
- }
- out := new(Response)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/k8s.io/client-go/pkg/version/.gitattributes b/vendor/k8s.io/client-go/pkg/version/.gitattributes
deleted file mode 100644
index 7e349eff6..000000000
--- a/vendor/k8s.io/client-go/pkg/version/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-base.go export-subst
diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go
deleted file mode 100644
index 9b4c79f89..000000000
--- a/vendor/k8s.io/client-go/pkg/version/base.go
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package version
-
-// Base version information.
-//
-// This is the fallback data used when version information from git is not
-// provided via go ldflags. It provides an approximation of the Kubernetes
-// version for ad-hoc builds (e.g. `go build`) that cannot get the version
-// information from git.
-//
-// If you are looking at these fields in the git tree, they look
-// strange. They are modified on the fly by the build process. The
-// in-tree values are dummy values used for "git archive", which also
-// works for GitHub tar downloads.
-//
-// When releasing a new Kubernetes version, this file is updated by
-// build/mark_new_version.sh to reflect the new version, and then a
-// git annotated tag (using format vX.Y where X == Major version and Y
-// == Minor version) is created to point to the commit that updates
-// pkg/version/base.go
-var (
- // TODO: Deprecate gitMajor and gitMinor, use only gitVersion
- // instead. First step in deprecation, keep the fields but make
- // them irrelevant. (Next we'll take it out, which may muck with
- // scripts consuming the kubectl version output - but most of
- // these should be looking at gitVersion already anyways.)
- gitMajor string = "" // major version, always numeric
- gitMinor string = "" // minor version, numeric possibly followed by "+"
-
- // semantic version, derived by build scripts (see
- // https://git.k8s.io/community/contributors/design-proposals/release/versioning.md
- // for a detailed discussion of this field)
- //
- // TODO: This field is still called "gitVersion" for legacy
- // reasons. For prerelease versions, the build metadata on the
- // semantic version is a git hash, but the version itself is no
- // longer the direct output of "git describe", but a slight
- // translation to be semver compliant.
-
- // NOTE: The $Format strings are replaced during 'git archive' thanks to the
- // companion .gitattributes file containing 'export-subst' in this same
- // directory. See also https://git-scm.com/docs/gitattributes
- gitVersion string = "v0.0.0-master+$Format:%h$"
- gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
- gitTreeState string = "" // state of git tree, either "clean" or "dirty"
-
- buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
-)
diff --git a/vendor/k8s.io/client-go/pkg/version/def.bzl b/vendor/k8s.io/client-go/pkg/version/def.bzl
deleted file mode 100644
index 9c018a4ef..000000000
--- a/vendor/k8s.io/client-go/pkg/version/def.bzl
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2017 The Kubernetes Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel.
-def version_x_defs():
- # This should match the list of packages in kube::version::ldflag
- stamp_pkgs = [
- "k8s.io/kubernetes/pkg/version",
- # In hack/lib/version.sh, this has a vendor/ prefix. That isn't needed here?
- "k8s.io/client-go/pkg/version",
- ]
- # This should match the list of vars in kube::version::ldflags
- # It should also match the list of vars set in hack/print-workspace-status.sh.
- stamp_vars = [
- "buildDate",
- "gitCommit",
- "gitMajor",
- "gitMinor",
- "gitTreeState",
- "gitVersion",
- ]
- # Generate the cross-product.
- x_defs = {}
- for pkg in stamp_pkgs:
- for var in stamp_vars:
- x_defs["%s.%s" % (pkg, var)] = "{%s}" % var
- return x_defs
diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go
deleted file mode 100644
index 05e997e13..000000000
--- a/vendor/k8s.io/client-go/pkg/version/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:openapi-gen=true
-
-// Package version supplies version information collected at build time to
-// kubernetes components.
-package version // import "k8s.io/client-go/pkg/version"
diff --git a/vendor/k8s.io/client-go/pkg/version/version.go b/vendor/k8s.io/client-go/pkg/version/version.go
deleted file mode 100644
index 8c8350d13..000000000
--- a/vendor/k8s.io/client-go/pkg/version/version.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package version
-
-import (
- "fmt"
- "runtime"
-
- apimachineryversion "k8s.io/apimachinery/pkg/version"
-)
-
-// Get returns the overall codebase version. It's for detecting
-// what code a binary was built from.
-func Get() apimachineryversion.Info {
- // These variables typically come from -ldflags settings and in
- // their absence fallback to the settings in pkg/version/base.go
- return apimachineryversion.Info{
- Major: gitMajor,
- Minor: gitMinor,
- GitVersion: gitVersion,
- GitCommit: gitCommit,
- GitTreeState: gitTreeState,
- BuildDate: buildDate,
- GoVersion: runtime.Version(),
- Compiler: runtime.Compiler,
- Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
- }
-}
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
deleted file mode 100644
index b88902c10..000000000
--- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package exec
-
-import (
- "bytes"
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "os"
- "os/exec"
- "reflect"
- "sync"
- "time"
-
- "github.com/davecgh/go-spew/spew"
- "golang.org/x/crypto/ssh/terminal"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/client-go/pkg/apis/clientauthentication"
- "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
- "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
- "k8s.io/client-go/tools/clientcmd/api"
- "k8s.io/client-go/transport"
- "k8s.io/client-go/util/connrotation"
- "k8s.io/klog"
-)
-
-const execInfoEnv = "KUBERNETES_EXEC_INFO"
-
-var scheme = runtime.NewScheme()
-var codecs = serializer.NewCodecFactory(scheme)
-
-func init() {
- v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
- utilruntime.Must(v1alpha1.AddToScheme(scheme))
- utilruntime.Must(v1beta1.AddToScheme(scheme))
- utilruntime.Must(clientauthentication.AddToScheme(scheme))
-}
-
-var (
- // Since transports can be constantly re-initialized by programs like kubectl,
- // keep a cache of initialized authenticators keyed by a hash of their config.
- globalCache = newCache()
- // The list of API versions we accept.
- apiVersions = map[string]schema.GroupVersion{
- v1alpha1.SchemeGroupVersion.String(): v1alpha1.SchemeGroupVersion,
- v1beta1.SchemeGroupVersion.String(): v1beta1.SchemeGroupVersion,
- }
-)
-
-func newCache() *cache {
- return &cache{m: make(map[string]*Authenticator)}
-}
-
-var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "}
-
-func cacheKey(c *api.ExecConfig) string {
- return spewConfig.Sprint(c)
-}
-
-type cache struct {
- mu sync.Mutex
- m map[string]*Authenticator
-}
-
-func (c *cache) get(s string) (*Authenticator, bool) {
- c.mu.Lock()
- defer c.mu.Unlock()
- a, ok := c.m[s]
- return a, ok
-}
-
-// put inserts an authenticator into the cache. If an authenticator is already
-// associated with the key, the first one is returned instead.
-func (c *cache) put(s string, a *Authenticator) *Authenticator {
- c.mu.Lock()
- defer c.mu.Unlock()
- existing, ok := c.m[s]
- if ok {
- return existing
- }
- c.m[s] = a
- return a
-}
-
-// GetAuthenticator returns an exec-based plugin for providing client credentials.
-func GetAuthenticator(config *api.ExecConfig) (*Authenticator, error) {
- return newAuthenticator(globalCache, config)
-}
-
-func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) {
- key := cacheKey(config)
- if a, ok := c.get(key); ok {
- return a, nil
- }
-
- gv, ok := apiVersions[config.APIVersion]
- if !ok {
- return nil, fmt.Errorf("exec plugin: invalid apiVersion %q", config.APIVersion)
- }
-
- a := &Authenticator{
- cmd: config.Command,
- args: config.Args,
- group: gv,
-
- stdin: os.Stdin,
- stderr: os.Stderr,
- interactive: terminal.IsTerminal(int(os.Stdout.Fd())),
- now: time.Now,
- environ: os.Environ,
- }
-
- for _, env := range config.Env {
- a.env = append(a.env, env.Name+"="+env.Value)
- }
-
- return c.put(key, a), nil
-}
-
-// Authenticator is a client credential provider that rotates credentials by executing a plugin.
-// The plugin input and output are defined by the API group client.authentication.k8s.io.
-type Authenticator struct {
- // Set by the config
- cmd string
- args []string
- group schema.GroupVersion
- env []string
-
- // Stubbable for testing
- stdin io.Reader
- stderr io.Writer
- interactive bool
- now func() time.Time
- environ func() []string
-
- // Cached results.
- //
- // The mutex also guards calling the plugin. Since the plugin could be
- // interactive we want to make sure it's only called once.
- mu sync.Mutex
- cachedCreds *credentials
- exp time.Time
-
- onRotate func()
-}
-
-type credentials struct {
- token string
- cert *tls.Certificate
-}
-
-// UpdateTransportConfig updates the transport.Config to use credentials
-// returned by the plugin.
-func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error {
- c.Wrap(func(rt http.RoundTripper) http.RoundTripper {
- return &roundTripper{a, rt}
- })
-
- if c.TLS.GetCert != nil {
- return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set")
- }
- c.TLS.GetCert = a.cert
-
- var dial func(ctx context.Context, network, addr string) (net.Conn, error)
- if c.Dial != nil {
- dial = c.Dial
- } else {
- dial = (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext
- }
- d := connrotation.NewDialer(dial)
- a.onRotate = d.CloseAll
- c.Dial = d.DialContext
-
- return nil
-}
-
-type roundTripper struct {
- a *Authenticator
- base http.RoundTripper
-}
-
-func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- // If a user has already set credentials, use that. This makes commands like
- // "kubectl get --token (token) pods" work.
- if req.Header.Get("Authorization") != "" {
- return r.base.RoundTrip(req)
- }
-
- creds, err := r.a.getCreds()
- if err != nil {
- return nil, fmt.Errorf("getting credentials: %v", err)
- }
- if creds.token != "" {
- req.Header.Set("Authorization", "Bearer "+creds.token)
- }
-
- res, err := r.base.RoundTrip(req)
- if err != nil {
- return nil, err
- }
- if res.StatusCode == http.StatusUnauthorized {
- resp := &clientauthentication.Response{
- Header: res.Header,
- Code: int32(res.StatusCode),
- }
- if err := r.a.maybeRefreshCreds(creds, resp); err != nil {
- klog.Errorf("refreshing credentials: %v", err)
- }
- }
- return res, nil
-}
-
-func (a *Authenticator) credsExpired() bool {
- if a.exp.IsZero() {
- return false
- }
- return a.now().After(a.exp)
-}
-
-func (a *Authenticator) cert() (*tls.Certificate, error) {
- creds, err := a.getCreds()
- if err != nil {
- return nil, err
- }
- return creds.cert, nil
-}
-
-func (a *Authenticator) getCreds() (*credentials, error) {
- a.mu.Lock()
- defer a.mu.Unlock()
- if a.cachedCreds != nil && !a.credsExpired() {
- return a.cachedCreds, nil
- }
-
- if err := a.refreshCredsLocked(nil); err != nil {
- return nil, err
- }
- return a.cachedCreds, nil
-}
-
-// maybeRefreshCreds executes the plugin to force a rotation of the
-// credentials, unless they were rotated already.
-func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentication.Response) error {
- a.mu.Lock()
- defer a.mu.Unlock()
-
- // Since we're not making a new pointer to a.cachedCreds in getCreds, no
- // need to do deep comparison.
- if creds != a.cachedCreds {
- // Credentials already rotated.
- return nil
- }
-
- return a.refreshCredsLocked(r)
-}
-
-// refreshCredsLocked executes the plugin and reads the credentials from
-// stdout. It must be called while holding the Authenticator's mutex.
-func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) error {
- cred := &clientauthentication.ExecCredential{
- Spec: clientauthentication.ExecCredentialSpec{
- Response: r,
- Interactive: a.interactive,
- },
- }
-
- env := append(a.environ(), a.env...)
- if a.group == v1alpha1.SchemeGroupVersion {
- // Input spec disabled for beta due to lack of use. Possibly re-enable this later if
- // someone wants it back.
- //
- // See: https://github.com/kubernetes/kubernetes/issues/61796
- data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred)
- if err != nil {
- return fmt.Errorf("encode ExecCredentials: %v", err)
- }
- env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data))
- }
-
- stdout := &bytes.Buffer{}
- cmd := exec.Command(a.cmd, a.args...)
- cmd.Env = env
- cmd.Stderr = a.stderr
- cmd.Stdout = stdout
- if a.interactive {
- cmd.Stdin = a.stdin
- }
-
- if err := cmd.Run(); err != nil {
- return fmt.Errorf("exec: %v", err)
- }
-
- _, gvk, err := codecs.UniversalDecoder(a.group).Decode(stdout.Bytes(), nil, cred)
- if err != nil {
- return fmt.Errorf("decoding stdout: %v", err)
- }
- if gvk.Group != a.group.Group || gvk.Version != a.group.Version {
- return fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s",
- a.group, schema.GroupVersion{Group: gvk.Group, Version: gvk.Version})
- }
-
- if cred.Status == nil {
- return fmt.Errorf("exec plugin didn't return a status field")
- }
- if cred.Status.Token == "" && cred.Status.ClientCertificateData == "" && cred.Status.ClientKeyData == "" {
- return fmt.Errorf("exec plugin didn't return a token or cert/key pair")
- }
- if (cred.Status.ClientCertificateData == "") != (cred.Status.ClientKeyData == "") {
- return fmt.Errorf("exec plugin returned only certificate or key, not both")
- }
-
- if cred.Status.ExpirationTimestamp != nil {
- a.exp = cred.Status.ExpirationTimestamp.Time
- } else {
- a.exp = time.Time{}
- }
-
- newCreds := &credentials{
- token: cred.Status.Token,
- }
- if cred.Status.ClientKeyData != "" && cred.Status.ClientCertificateData != "" {
- cert, err := tls.X509KeyPair([]byte(cred.Status.ClientCertificateData), []byte(cred.Status.ClientKeyData))
- if err != nil {
- return fmt.Errorf("failed parsing client key/certificate: %v", err)
- }
- newCreds.cert = &cert
- }
-
- oldCreds := a.cachedCreds
- a.cachedCreds = newCreds
- // Only close all connections when TLS cert rotates. Token rotation doesn't
- // need the extra noise.
- if a.onRotate != nil && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
- a.onRotate()
- }
- return nil
-}
diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS
deleted file mode 100644
index 49dabc61b..000000000
--- a/vendor/k8s.io/client-go/rest/OWNERS
+++ /dev/null
@@ -1,26 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-reviewers:
-- thockin
-- smarterclayton
-- caesarxuchao
-- wojtek-t
-- deads2k
-- brendandburns
-- liggitt
-- nikhiljindal
-- gmarek
-- erictune
-- sttts
-- luxas
-- dims
-- errordeveloper
-- hongchaodeng
-- krousey
-- resouer
-- cjcullen
-- rmmh
-- lixiaobing10051267
-- asalkeld
-- juanvallejo
-- lojies
diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go
deleted file mode 100644
index 927403cb2..000000000
--- a/vendor/k8s.io/client-go/rest/client.go
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rest
-
-import (
- "fmt"
- "mime"
- "net/http"
- "net/url"
- "os"
- "strconv"
- "strings"
- "time"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/util/flowcontrol"
-)
-
-const (
- // Environment variables: Note that the duration should be long enough that the backoff
- // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1".
- envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE"
- envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION"
-)
-
-// Interface captures the set of operations for generically interacting with Kubernetes REST apis.
-type Interface interface {
- GetRateLimiter() flowcontrol.RateLimiter
- Verb(verb string) *Request
- Post() *Request
- Put() *Request
- Patch(pt types.PatchType) *Request
- Get() *Request
- Delete() *Request
- APIVersion() schema.GroupVersion
-}
-
-// RESTClient imposes common Kubernetes API conventions on a set of resource paths.
-// The baseURL is expected to point to an HTTP or HTTPS path that is the parent
-// of one or more resources. The server should return a decodable API resource
-// object, or an api.Status object which contains information about the reason for
-// any failure.
-//
-// Most consumers should use client.New() to get a Kubernetes API client.
-type RESTClient struct {
- // base is the root URL for all invocations of the client
- base *url.URL
- // versionedAPIPath is a path segment connecting the base URL to the resource root
- versionedAPIPath string
-
- // contentConfig is the information used to communicate with the server.
- contentConfig ContentConfig
-
- // serializers contain all serializers for underlying content type.
- serializers Serializers
-
- // creates BackoffManager that is passed to requests.
- createBackoffMgr func() BackoffManager
-
- // TODO extract this into a wrapper interface via the RESTClient interface in kubectl.
- Throttle flowcontrol.RateLimiter
-
- // Set specific behavior of the client. If not set http.DefaultClient will be used.
- Client *http.Client
-}
-
-type Serializers struct {
- Encoder runtime.Encoder
- Decoder runtime.Decoder
- StreamingSerializer runtime.Serializer
- Framer runtime.Framer
- RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error)
-}
-
-// NewRESTClient creates a new RESTClient. This client performs generic REST functions
-// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and
-// decoding of responses from the server.
-func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) {
- base := *baseURL
- if !strings.HasSuffix(base.Path, "/") {
- base.Path += "/"
- }
- base.RawQuery = ""
- base.Fragment = ""
-
- if config.GroupVersion == nil {
- config.GroupVersion = &schema.GroupVersion{}
- }
- if len(config.ContentType) == 0 {
- config.ContentType = "application/json"
- }
- serializers, err := createSerializers(config)
- if err != nil {
- return nil, err
- }
-
- var throttle flowcontrol.RateLimiter
- if maxQPS > 0 && rateLimiter == nil {
- throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst)
- } else if rateLimiter != nil {
- throttle = rateLimiter
- }
- return &RESTClient{
- base: &base,
- versionedAPIPath: versionedAPIPath,
- contentConfig: config,
- serializers: *serializers,
- createBackoffMgr: readExpBackoffConfig,
- Throttle: throttle,
- Client: client,
- }, nil
-}
-
-// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client
-func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
- if c == nil {
- return nil
- }
- return c.Throttle
-}
-
-// readExpBackoffConfig handles the internal logic of determining what the
-// backoff policy is. By default if no information is available, NoBackoff.
-// TODO Generalize this see #17727 .
-func readExpBackoffConfig() BackoffManager {
- backoffBase := os.Getenv(envBackoffBase)
- backoffDuration := os.Getenv(envBackoffDuration)
-
- backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64)
- backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64)
- if errBase != nil || errDuration != nil {
- return &NoBackoff{}
- }
- return &URLBackoff{
- Backoff: flowcontrol.NewBackOff(
- time.Duration(backoffBaseInt)*time.Second,
- time.Duration(backoffDurationInt)*time.Second)}
-}
-
-// createSerializers creates all necessary serializers for given contentType.
-// TODO: the negotiated serializer passed to this method should probably return
-// serializers that control decoding and versioning without this package
-// being aware of the types. Depends on whether RESTClient must deal with
-// generic infrastructure.
-func createSerializers(config ContentConfig) (*Serializers, error) {
- mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes()
- contentType := config.ContentType
- mediaType, _, err := mime.ParseMediaType(contentType)
- if err != nil {
- return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err)
- }
- info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType)
- if !ok {
- if len(contentType) != 0 || len(mediaTypes) == 0 {
- return nil, fmt.Errorf("no serializers registered for %s", contentType)
- }
- info = mediaTypes[0]
- }
-
- internalGV := schema.GroupVersions{
- {
- Group: config.GroupVersion.Group,
- Version: runtime.APIVersionInternal,
- },
- // always include the legacy group as a decoding target to handle non-error `Status` return types
- {
- Group: "",
- Version: runtime.APIVersionInternal,
- },
- }
-
- s := &Serializers{
- Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion),
- Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV),
-
- RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) {
- info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType)
- if !ok {
- return nil, fmt.Errorf("serializer for %s not registered", contentType)
- }
- return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil
- },
- }
- if info.StreamSerializer != nil {
- s.StreamingSerializer = info.StreamSerializer.Serializer
- s.Framer = info.StreamSerializer.Framer
- }
-
- return s, nil
-}
-
-// Verb begins a request with a verb (GET, POST, PUT, DELETE).
-//
-// Example usage of RESTClient's request building interface:
-// c, err := NewRESTClient(...)
-// if err != nil { ... }
-// resp, err := c.Verb("GET").
-// Path("pods").
-// SelectorParam("labels", "area=staging").
-// Timeout(10*time.Second).
-// Do()
-// if err != nil { ... }
-// list, ok := resp.(*api.PodList)
-//
-func (c *RESTClient) Verb(verb string) *Request {
- backoff := c.createBackoffMgr()
-
- if c.Client == nil {
- return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, 0)
- }
- return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, c.Client.Timeout)
-}
-
-// Post begins a POST request. Short for c.Verb("POST").
-func (c *RESTClient) Post() *Request {
- return c.Verb("POST")
-}
-
-// Put begins a PUT request. Short for c.Verb("PUT").
-func (c *RESTClient) Put() *Request {
- return c.Verb("PUT")
-}
-
-// Patch begins a PATCH request. Short for c.Verb("Patch").
-func (c *RESTClient) Patch(pt types.PatchType) *Request {
- return c.Verb("PATCH").SetHeader("Content-Type", string(pt))
-}
-
-// Get begins a GET request. Short for c.Verb("GET").
-func (c *RESTClient) Get() *Request {
- return c.Verb("GET")
-}
-
-// Delete begins a DELETE request. Short for c.Verb("DELETE").
-func (c *RESTClient) Delete() *Request {
- return c.Verb("DELETE")
-}
-
-// APIVersion returns the APIVersion this RESTClient is expected to use.
-func (c *RESTClient) APIVersion() schema.GroupVersion {
- return *c.contentConfig.GroupVersion
-}
diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go
deleted file mode 100644
index c75825ec5..000000000
--- a/vendor/k8s.io/client-go/rest/config.go
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rest
-
-import (
- "context"
- "errors"
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "os"
- "path/filepath"
- gruntime "runtime"
- "strings"
- "time"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/client-go/pkg/version"
- clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
- "k8s.io/client-go/transport"
- certutil "k8s.io/client-go/util/cert"
- "k8s.io/client-go/util/flowcontrol"
- "k8s.io/klog"
-)
-
-const (
- DefaultQPS float32 = 5.0
- DefaultBurst int = 10
-)
-
-var ErrNotInCluster = errors.New("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
-
-// Config holds the common attributes that can be passed to a Kubernetes client on
-// initialization.
-type Config struct {
- // Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
- // If a URL is given then the (optional) Path of that URL represents a prefix that must
- // be appended to all request URIs used to access the apiserver. This allows a frontend
- // proxy to easily relocate all of the apiserver endpoints.
- Host string
- // APIPath is a sub-path that points to an API root.
- APIPath string
-
- // ContentConfig contains settings that affect how objects are transformed when
- // sent to the server.
- ContentConfig
-
- // Server requires Basic authentication
- Username string
- Password string
-
- // Server requires Bearer authentication. This client will not attempt to use
- // refresh tokens for an OAuth2 flow.
- // TODO: demonstrate an OAuth2 compatible client.
- BearerToken string
-
- // Path to a file containing a BearerToken.
- // If set, the contents are periodically read.
- // The last successfully read value takes precedence over BearerToken.
- BearerTokenFile string
-
- // Impersonate is the configuration that RESTClient will use for impersonation.
- Impersonate ImpersonationConfig
-
- // Server requires plugin-specified authentication.
- AuthProvider *clientcmdapi.AuthProviderConfig
-
- // Callback to persist config for AuthProvider.
- AuthConfigPersister AuthProviderConfigPersister
-
- // Exec-based authentication provider.
- ExecProvider *clientcmdapi.ExecConfig
-
- // TLSClientConfig contains settings to enable transport layer security
- TLSClientConfig
-
- // UserAgent is an optional field that specifies the caller of this request.
- UserAgent string
-
- // Transport may be used for custom HTTP behavior. This attribute may not
- // be specified with the TLS client certificate options. Use WrapTransport
- // to provide additional per-server middleware behavior.
- Transport http.RoundTripper
- // WrapTransport will be invoked for custom HTTP behavior after the underlying
- // transport is initialized (either the transport created from TLSClientConfig,
- // Transport, or http.DefaultTransport). The config may layer other RoundTrippers
- // on top of the returned RoundTripper.
- //
- // A future release will change this field to an array. Use config.Wrap()
- // instead of setting this value directly.
- WrapTransport transport.WrapperFunc
-
- // QPS indicates the maximum QPS to the master from this client.
- // If it's zero, the created RESTClient will use DefaultQPS: 5
- QPS float32
-
- // Maximum burst for throttle.
- // If it's zero, the created RESTClient will use DefaultBurst: 10.
- Burst int
-
- // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst
- RateLimiter flowcontrol.RateLimiter
-
- // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout.
- Timeout time.Duration
-
- // Dial specifies the dial function for creating unencrypted TCP connections.
- Dial func(ctx context.Context, network, address string) (net.Conn, error)
-
- // Version forces a specific version to be used (if registered)
- // Do we need this?
- // Version string
-}
-
-var _ fmt.Stringer = new(Config)
-var _ fmt.GoStringer = new(Config)
-
-type sanitizedConfig *Config
-
-type sanitizedAuthConfigPersister struct{ AuthProviderConfigPersister }
-
-func (sanitizedAuthConfigPersister) GoString() string {
- return "rest.AuthProviderConfigPersister(--- REDACTED ---)"
-}
-func (sanitizedAuthConfigPersister) String() string {
- return "rest.AuthProviderConfigPersister(--- REDACTED ---)"
-}
-
-// GoString implements fmt.GoStringer and sanitizes sensitive fields of Config
-// to prevent accidental leaking via logs.
-func (c *Config) GoString() string {
- return c.String()
-}
-
-// String implements fmt.Stringer and sanitizes sensitive fields of Config to
-// prevent accidental leaking via logs.
-func (c *Config) String() string {
- if c == nil {
- return "<nil>"
- }
- cc := sanitizedConfig(CopyConfig(c))
- // Explicitly mark non-empty credential fields as redacted.
- if cc.Password != "" {
- cc.Password = "--- REDACTED ---"
- }
- if cc.BearerToken != "" {
- cc.BearerToken = "--- REDACTED ---"
- }
- if cc.AuthConfigPersister != nil {
- cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister}
- }
-
- return fmt.Sprintf("%#v", cc)
-}
-
-// ImpersonationConfig has all the available impersonation options
-type ImpersonationConfig struct {
- // UserName is the username to impersonate on each request.
- UserName string
- // Groups are the groups to impersonate on each request.
- Groups []string
- // Extra is a free-form field which can be used to link some authentication information
- // to authorization information. This field allows you to impersonate it.
- Extra map[string][]string
-}
-
-// +k8s:deepcopy-gen=true
-// TLSClientConfig contains settings to enable transport layer security
-type TLSClientConfig struct {
- // Server should be accessed without verifying the TLS certificate. For testing only.
- Insecure bool
- // ServerName is passed to the server for SNI and is used in the client to check server
- // ceritificates against. If ServerName is empty, the hostname used to contact the
- // server is used.
- ServerName string
-
- // Server requires TLS client certificate authentication
- CertFile string
- // Server requires TLS client certificate authentication
- KeyFile string
- // Trusted root certificates for server
- CAFile string
-
- // CertData holds PEM-encoded bytes (typically read from a client certificate file).
- // CertData takes precedence over CertFile
- CertData []byte
- // KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
- // KeyData takes precedence over KeyFile
- KeyData []byte
- // CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
- // CAData takes precedence over CAFile
- CAData []byte
-}
-
-var _ fmt.Stringer = TLSClientConfig{}
-var _ fmt.GoStringer = TLSClientConfig{}
-
-type sanitizedTLSClientConfig TLSClientConfig
-
-// GoString implements fmt.GoStringer and sanitizes sensitive fields of
-// TLSClientConfig to prevent accidental leaking via logs.
-func (c TLSClientConfig) GoString() string {
- return c.String()
-}
-
-// String implements fmt.Stringer and sanitizes sensitive fields of
-// TLSClientConfig to prevent accidental leaking via logs.
-func (c TLSClientConfig) String() string {
- cc := sanitizedTLSClientConfig{
- Insecure: c.Insecure,
- ServerName: c.ServerName,
- CertFile: c.CertFile,
- KeyFile: c.KeyFile,
- CAFile: c.CAFile,
- CertData: c.CertData,
- KeyData: c.KeyData,
- CAData: c.CAData,
- }
- // Explicitly mark non-empty credential fields as redacted.
- if len(cc.CertData) != 0 {
- cc.CertData = []byte("--- TRUNCATED ---")
- }
- if len(cc.KeyData) != 0 {
- cc.KeyData = []byte("--- REDACTED ---")
- }
- return fmt.Sprintf("%#v", cc)
-}
-
-type ContentConfig struct {
- // AcceptContentTypes specifies the types the client will accept and is optional.
- // If not set, ContentType will be used to define the Accept header
- AcceptContentTypes string
- // ContentType specifies the wire format used to communicate with the server.
- // This value will be set as the Accept header on requests made to the server, and
- // as the default content type on any object sent to the server. If not set,
- // "application/json" is used.
- ContentType string
- // GroupVersion is the API version to talk to. Must be provided when initializing
- // a RESTClient directly. When initializing a Client, will be set with the default
- // code version.
- GroupVersion *schema.GroupVersion
- // NegotiatedSerializer is used for obtaining encoders and decoders for multiple
- // supported media types.
- NegotiatedSerializer runtime.NegotiatedSerializer
-}
-
-// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
-// object. Note that a RESTClient may require fields that are optional when initializing a Client.
-// A RESTClient created by this method is generic - it expects to operate on an API that follows
-// the Kubernetes conventions, but may not be the Kubernetes API.
-func RESTClientFor(config *Config) (*RESTClient, error) {
- if config.GroupVersion == nil {
- return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient")
- }
- if config.NegotiatedSerializer == nil {
- return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
- }
- qps := config.QPS
- if config.QPS == 0.0 {
- qps = DefaultQPS
- }
- burst := config.Burst
- if config.Burst == 0 {
- burst = DefaultBurst
- }
-
- baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
- if err != nil {
- return nil, err
- }
-
- transport, err := TransportFor(config)
- if err != nil {
- return nil, err
- }
-
- var httpClient *http.Client
- if transport != http.DefaultTransport {
- httpClient = &http.Client{Transport: transport}
- if config.Timeout > 0 {
- httpClient.Timeout = config.Timeout
- }
- }
-
- return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient)
-}
-
-// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows
-// the config.Version to be empty.
-func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
- if config.NegotiatedSerializer == nil {
- return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
- }
-
- baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
- if err != nil {
- return nil, err
- }
-
- transport, err := TransportFor(config)
- if err != nil {
- return nil, err
- }
-
- var httpClient *http.Client
- if transport != http.DefaultTransport {
- httpClient = &http.Client{Transport: transport}
- if config.Timeout > 0 {
- httpClient.Timeout = config.Timeout
- }
- }
-
- versionConfig := config.ContentConfig
- if versionConfig.GroupVersion == nil {
- v := metav1.SchemeGroupVersion
- versionConfig.GroupVersion = &v
- }
-
- return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient)
-}
-
-// SetKubernetesDefaults sets default values on the provided client config for accessing the
-// Kubernetes API or returns an error if any of the defaults are impossible or invalid.
-func SetKubernetesDefaults(config *Config) error {
- if len(config.UserAgent) == 0 {
- config.UserAgent = DefaultKubernetesUserAgent()
- }
- return nil
-}
-
-// adjustCommit returns sufficient significant figures of the commit's git hash.
-func adjustCommit(c string) string {
- if len(c) == 0 {
- return "unknown"
- }
- if len(c) > 7 {
- return c[:7]
- }
- return c
-}
-
-// adjustVersion strips "alpha", "beta", etc. from version in form
-// major.minor.patch-[alpha|beta|etc].
-func adjustVersion(v string) string {
- if len(v) == 0 {
- return "unknown"
- }
- seg := strings.SplitN(v, "-", 2)
- return seg[0]
-}
-
-// adjustCommand returns the last component of the
-// OS-specific command path for use in User-Agent.
-func adjustCommand(p string) string {
- // Unlikely, but better than returning "".
- if len(p) == 0 {
- return "unknown"
- }
- return filepath.Base(p)
-}
-
-// buildUserAgent builds a User-Agent string from given args.
-func buildUserAgent(command, version, os, arch, commit string) string {
- return fmt.Sprintf(
- "%s/%s (%s/%s) kubernetes/%s", command, version, os, arch, commit)
-}
-
-// DefaultKubernetesUserAgent returns a User-Agent string built from static global vars.
-func DefaultKubernetesUserAgent() string {
- return buildUserAgent(
- adjustCommand(os.Args[0]),
- adjustVersion(version.Get().GitVersion),
- gruntime.GOOS,
- gruntime.GOARCH,
- adjustCommit(version.Get().GitCommit))
-}
-
-// InClusterConfig returns a config object which uses the service account
-// kubernetes gives to pods. It's intended for clients that expect to be
-// running inside a pod running on kubernetes. It will return ErrNotInCluster
-// if called from a process not running in a kubernetes environment.
-func InClusterConfig() (*Config, error) {
- const (
- tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
- rootCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- )
- host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
- if len(host) == 0 || len(port) == 0 {
- return nil, ErrNotInCluster
- }
-
- token, err := ioutil.ReadFile(tokenFile)
- if err != nil {
- return nil, err
- }
-
- tlsClientConfig := TLSClientConfig{}
-
- if _, err := certutil.NewPool(rootCAFile); err != nil {
- klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
- } else {
- tlsClientConfig.CAFile = rootCAFile
- }
-
- return &Config{
- // TODO: switch to using cluster DNS.
- Host: "https://" + net.JoinHostPort(host, port),
- TLSClientConfig: tlsClientConfig,
- BearerToken: string(token),
- BearerTokenFile: tokenFile,
- }, nil
-}
-
-// IsConfigTransportTLS returns true if and only if the provided
-// config will result in a protected connection to the server when it
-// is passed to restclient.RESTClientFor(). Use to determine when to
-// send credentials over the wire.
-//
-// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
-// still possible.
-func IsConfigTransportTLS(config Config) bool {
- baseURL, _, err := defaultServerUrlFor(&config)
- if err != nil {
- return false
- }
- return baseURL.Scheme == "https"
-}
-
-// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
-// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
-// either populated or were empty to start.
-func LoadTLSFiles(c *Config) error {
- var err error
- c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile)
- if err != nil {
- return err
- }
-
- c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile)
- if err != nil {
- return err
- }
-
- c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile)
- if err != nil {
- return err
- }
- return nil
-}
-
-// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
-// or an error if an error occurred reading the file
-func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
- if len(data) > 0 {
- return data, nil
- }
- if len(file) > 0 {
- fileData, err := ioutil.ReadFile(file)
- if err != nil {
- return []byte{}, err
- }
- return fileData, nil
- }
- return nil, nil
-}
-
-func AddUserAgent(config *Config, userAgent string) *Config {
- fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent
- config.UserAgent = fullUserAgent
- return config
-}
-
-// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) and custom transports (WrapTransport, Transport) removed
-func AnonymousClientConfig(config *Config) *Config {
- // copy only known safe fields
- return &Config{
- Host: config.Host,
- APIPath: config.APIPath,
- ContentConfig: config.ContentConfig,
- TLSClientConfig: TLSClientConfig{
- Insecure: config.Insecure,
- ServerName: config.ServerName,
- CAFile: config.TLSClientConfig.CAFile,
- CAData: config.TLSClientConfig.CAData,
- },
- RateLimiter: config.RateLimiter,
- UserAgent: config.UserAgent,
- QPS: config.QPS,
- Burst: config.Burst,
- Timeout: config.Timeout,
- Dial: config.Dial,
- }
-}
-
-// CopyConfig returns a copy of the given config
-func CopyConfig(config *Config) *Config {
- return &Config{
- Host: config.Host,
- APIPath: config.APIPath,
- ContentConfig: config.ContentConfig,
- Username: config.Username,
- Password: config.Password,
- BearerToken: config.BearerToken,
- BearerTokenFile: config.BearerTokenFile,
- Impersonate: ImpersonationConfig{
- Groups: config.Impersonate.Groups,
- Extra: config.Impersonate.Extra,
- UserName: config.Impersonate.UserName,
- },
- AuthProvider: config.AuthProvider,
- AuthConfigPersister: config.AuthConfigPersister,
- ExecProvider: config.ExecProvider,
- TLSClientConfig: TLSClientConfig{
- Insecure: config.TLSClientConfig.Insecure,
- ServerName: config.TLSClientConfig.ServerName,
- CertFile: config.TLSClientConfig.CertFile,
- KeyFile: config.TLSClientConfig.KeyFile,
- CAFile: config.TLSClientConfig.CAFile,
- CertData: config.TLSClientConfig.CertData,
- KeyData: config.TLSClientConfig.KeyData,
- CAData: config.TLSClientConfig.CAData,
- },
- UserAgent: config.UserAgent,
- Transport: config.Transport,
- WrapTransport: config.WrapTransport,
- QPS: config.QPS,
- Burst: config.Burst,
- RateLimiter: config.RateLimiter,
- Timeout: config.Timeout,
- Dial: config.Dial,
- }
-}
diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go
deleted file mode 100644
index 83ef5ae32..000000000
--- a/vendor/k8s.io/client-go/rest/plugin.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rest
-
-import (
- "fmt"
- "net/http"
- "sync"
-
- "k8s.io/klog"
-
- clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
-)
-
-type AuthProvider interface {
- // WrapTransport allows the plugin to create a modified RoundTripper that
- // attaches authorization headers (or other info) to requests.
- WrapTransport(http.RoundTripper) http.RoundTripper
- // Login allows the plugin to initialize its configuration. It must not
- // require direct user interaction.
- Login() error
-}
-
-// Factory generates an AuthProvider plugin.
-// clusterAddress is the address of the current cluster.
-// config is the initial configuration for this plugin.
-// persister allows the plugin to save updated configuration.
-type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error)
-
-// AuthProviderConfigPersister allows a plugin to persist configuration info
-// for just itself.
-type AuthProviderConfigPersister interface {
- Persist(map[string]string) error
-}
-
-// All registered auth provider plugins.
-var pluginsLock sync.Mutex
-var plugins = make(map[string]Factory)
-
-func RegisterAuthProviderPlugin(name string, plugin Factory) error {
- pluginsLock.Lock()
- defer pluginsLock.Unlock()
- if _, found := plugins[name]; found {
- return fmt.Errorf("Auth Provider Plugin %q was registered twice", name)
- }
- klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
- plugins[name] = plugin
- return nil
-}
-
-func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) {
- pluginsLock.Lock()
- defer pluginsLock.Unlock()
- p, ok := plugins[apc.Name]
- if !ok {
- return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name)
- }
- return p(clusterAddress, apc.Config, persister)
-}
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
deleted file mode 100644
index 0570615fc..000000000
--- a/vendor/k8s.io/client-go/rest/request.go
+++ /dev/null
@@ -1,1206 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rest
-
-import (
- "bytes"
- "context"
- "encoding/hex"
- "fmt"
- "io"
- "io/ioutil"
- "mime"
- "net/http"
- "net/url"
- "path"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "golang.org/x/net/http2"
- "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer/streaming"
- "k8s.io/apimachinery/pkg/util/net"
- "k8s.io/apimachinery/pkg/watch"
- restclientwatch "k8s.io/client-go/rest/watch"
- "k8s.io/client-go/tools/metrics"
- "k8s.io/client-go/util/flowcontrol"
- "k8s.io/klog"
-)
-
-var (
- // longThrottleLatency defines threshold for logging requests. All requests being
- // throttle for more than longThrottleLatency will be logged.
- longThrottleLatency = 50 * time.Millisecond
-)
-
-// HTTPClient is an interface for testing a request object.
-type HTTPClient interface {
- Do(req *http.Request) (*http.Response, error)
-}
-
-// ResponseWrapper is an interface for getting a response.
-// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream.
-type ResponseWrapper interface {
- DoRaw() ([]byte, error)
- Stream() (io.ReadCloser, error)
-}
-
-// RequestConstructionError is returned when there's an error assembling a request.
-type RequestConstructionError struct {
- Err error
-}
-
-// Error returns a textual description of 'r'.
-func (r *RequestConstructionError) Error() string {
- return fmt.Sprintf("request construction error: '%v'", r.Err)
-}
-
-// Request allows for building up a request to a server in a chained fashion.
-// Any errors are stored until the end of your call, so you only have to
-// check once.
-type Request struct {
- // required
- client HTTPClient
- verb string
-
- baseURL *url.URL
- content ContentConfig
- serializers Serializers
-
- // generic components accessible via method setters
- pathPrefix string
- subpath string
- params url.Values
- headers http.Header
-
- // structural elements of the request that are part of the Kubernetes API conventions
- namespace string
- namespaceSet bool
- resource string
- resourceName string
- subresource string
- timeout time.Duration
-
- // output
- err error
- body io.Reader
-
- // This is only used for per-request timeouts, deadlines, and cancellations.
- ctx context.Context
-
- backoffMgr BackoffManager
- throttle flowcontrol.RateLimiter
-}
-
-// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
-func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request {
- if backoff == nil {
- klog.V(2).Infof("Not implementing request backoff strategy.")
- backoff = &NoBackoff{}
- }
-
- pathPrefix := "/"
- if baseURL != nil {
- pathPrefix = path.Join(pathPrefix, baseURL.Path)
- }
- r := &Request{
- client: client,
- verb: verb,
- baseURL: baseURL,
- pathPrefix: path.Join(pathPrefix, versionedAPIPath),
- content: content,
- serializers: serializers,
- backoffMgr: backoff,
- throttle: throttle,
- timeout: timeout,
- }
- switch {
- case len(content.AcceptContentTypes) > 0:
- r.SetHeader("Accept", content.AcceptContentTypes)
- case len(content.ContentType) > 0:
- r.SetHeader("Accept", content.ContentType+", */*")
- }
- return r
-}
-
-// Prefix adds segments to the relative beginning to the request path. These
-// items will be placed before the optional Namespace, Resource, or Name sections.
-// Setting AbsPath will clear any previously set Prefix segments
-func (r *Request) Prefix(segments ...string) *Request {
- if r.err != nil {
- return r
- }
- r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...))
- return r
-}
-
-// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional
-// Namespace, Resource, or Name sections.
-func (r *Request) Suffix(segments ...string) *Request {
- if r.err != nil {
- return r
- }
- r.subpath = path.Join(r.subpath, path.Join(segments...))
- return r
-}
-
-// Resource sets the resource to access (<resource>/[ns/<namespace>/]<name>)
-func (r *Request) Resource(resource string) *Request {
- if r.err != nil {
- return r
- }
- if len(r.resource) != 0 {
- r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource)
- return r
- }
- if msgs := IsValidPathSegmentName(resource); len(msgs) != 0 {
- r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs)
- return r
- }
- r.resource = resource
- return r
-}
-
-// BackOff sets the request's backoff manager to the one specified,
-// or defaults to the stub implementation if nil is provided
-func (r *Request) BackOff(manager BackoffManager) *Request {
- if manager == nil {
- r.backoffMgr = &NoBackoff{}
- return r
- }
-
- r.backoffMgr = manager
- return r
-}
-
-// Throttle receives a rate-limiter and sets or replaces an existing request limiter
-func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request {
- r.throttle = limiter
- return r
-}
-
-// SubResource sets a sub-resource path which can be multiple segments after the resource
-// name but before the suffix.
-func (r *Request) SubResource(subresources ...string) *Request {
- if r.err != nil {
- return r
- }
- subresource := path.Join(subresources...)
- if len(r.subresource) != 0 {
- r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource)
- return r
- }
- for _, s := range subresources {
- if msgs := IsValidPathSegmentName(s); len(msgs) != 0 {
- r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs)
- return r
- }
- }
- r.subresource = subresource
- return r
-}
-
-// Name sets the name of a resource to access (<resource>/[ns/<namespace>/]<name>)
-func (r *Request) Name(resourceName string) *Request {
- if r.err != nil {
- return r
- }
- if len(resourceName) == 0 {
- r.err = fmt.Errorf("resource name may not be empty")
- return r
- }
- if len(r.resourceName) != 0 {
- r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName)
- return r
- }
- if msgs := IsValidPathSegmentName(resourceName); len(msgs) != 0 {
- r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs)
- return r
- }
- r.resourceName = resourceName
- return r
-}
-
-// Namespace applies the namespace scope to a request (<resource>/[ns/<namespace>/]<name>)
-func (r *Request) Namespace(namespace string) *Request {
- if r.err != nil {
- return r
- }
- if r.namespaceSet {
- r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace)
- return r
- }
- if msgs := IsValidPathSegmentName(namespace); len(msgs) != 0 {
- r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs)
- return r
- }
- r.namespaceSet = true
- r.namespace = namespace
- return r
-}
-
-// NamespaceIfScoped is a convenience function to set a namespace if scoped is true
-func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request {
- if scoped {
- return r.Namespace(namespace)
- }
- return r
-}
-
-// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved
-// when a single segment is passed.
-func (r *Request) AbsPath(segments ...string) *Request {
- if r.err != nil {
- return r
- }
- r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...))
- if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") {
- // preserve any trailing slashes for legacy behavior
- r.pathPrefix += "/"
- }
- return r
-}
-
-// RequestURI overwrites existing path and parameters with the value of the provided server relative
-// URI.
-func (r *Request) RequestURI(uri string) *Request {
- if r.err != nil {
- return r
- }
- locator, err := url.Parse(uri)
- if err != nil {
- r.err = err
- return r
- }
- r.pathPrefix = locator.Path
- if len(locator.Query()) > 0 {
- if r.params == nil {
- r.params = make(url.Values)
- }
- for k, v := range locator.Query() {
- r.params[k] = v
- }
- }
- return r
-}
-
-// Param creates a query parameter with the given string value.
-func (r *Request) Param(paramName, s string) *Request {
- if r.err != nil {
- return r
- }
- return r.setParam(paramName, s)
-}
-
-// VersionedParams will take the provided object, serialize it to a map[string][]string using the
-// implicit RESTClient API version and the default parameter codec, and then add those as parameters
-// to the request. Use this to provide versioned query parameters from client libraries.
-// VersionedParams will not write query parameters that have omitempty set and are empty. If a
-// parameter has already been set it is appended to (Params and VersionedParams are additive).
-func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
- return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion)
-}
-
-func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request {
- if r.err != nil {
- return r
- }
- params, err := codec.EncodeParameters(obj, version)
- if err != nil {
- r.err = err
- return r
- }
- for k, v := range params {
- if r.params == nil {
- r.params = make(url.Values)
- }
- r.params[k] = append(r.params[k], v...)
- }
- return r
-}
-
-func (r *Request) setParam(paramName, value string) *Request {
- if r.params == nil {
- r.params = make(url.Values)
- }
- r.params[paramName] = append(r.params[paramName], value)
- return r
-}
-
-func (r *Request) SetHeader(key string, values ...string) *Request {
- if r.headers == nil {
- r.headers = http.Header{}
- }
- r.headers.Del(key)
- for _, value := range values {
- r.headers.Add(key, value)
- }
- return r
-}
-
-// Timeout makes the request use the given duration as an overall timeout for the
-// request. Additionally, if set passes the value as "timeout" parameter in URL.
-func (r *Request) Timeout(d time.Duration) *Request {
- if r.err != nil {
- return r
- }
- r.timeout = d
- return r
-}
-
-// Body makes the request use obj as the body. Optional.
-// If obj is a string, try to read a file of that name.
-// If obj is a []byte, send it directly.
-// If obj is an io.Reader, use it directly.
-// If obj is a runtime.Object, marshal it correctly, and set Content-Type header.
-// If obj is a runtime.Object and nil, do nothing.
-// Otherwise, set an error.
-func (r *Request) Body(obj interface{}) *Request {
- if r.err != nil {
- return r
- }
- switch t := obj.(type) {
- case string:
- data, err := ioutil.ReadFile(t)
- if err != nil {
- r.err = err
- return r
- }
- glogBody("Request Body", data)
- r.body = bytes.NewReader(data)
- case []byte:
- glogBody("Request Body", t)
- r.body = bytes.NewReader(t)
- case io.Reader:
- r.body = t
- case runtime.Object:
- // callers may pass typed interface pointers, therefore we must check nil with reflection
- if reflect.ValueOf(t).IsNil() {
- return r
- }
- data, err := runtime.Encode(r.serializers.Encoder, t)
- if err != nil {
- r.err = err
- return r
- }
- glogBody("Request Body", data)
- r.body = bytes.NewReader(data)
- r.SetHeader("Content-Type", r.content.ContentType)
- default:
- r.err = fmt.Errorf("unknown type used for body: %+v", obj)
- }
- return r
-}
-
-// Context adds a context to the request. Contexts are only used for
-// timeouts, deadlines, and cancellations.
-func (r *Request) Context(ctx context.Context) *Request {
- r.ctx = ctx
- return r
-}
-
-// URL returns the current working URL.
-func (r *Request) URL() *url.URL {
- p := r.pathPrefix
- if r.namespaceSet && len(r.namespace) > 0 {
- p = path.Join(p, "namespaces", r.namespace)
- }
- if len(r.resource) != 0 {
- p = path.Join(p, strings.ToLower(r.resource))
- }
- // Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compatibility if nothing was changed
- if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 {
- p = path.Join(p, r.resourceName, r.subresource, r.subpath)
- }
-
- finalURL := &url.URL{}
- if r.baseURL != nil {
- *finalURL = *r.baseURL
- }
- finalURL.Path = p
-
- query := url.Values{}
- for key, values := range r.params {
- for _, value := range values {
- query.Add(key, value)
- }
- }
-
- // timeout is handled specially here.
- if r.timeout != 0 {
- query.Set("timeout", r.timeout.String())
- }
- finalURL.RawQuery = query.Encode()
- return finalURL
-}
-
-// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
-// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
-// parameters will be reset. This creates a copy of the url so as not to change the
-// underlying object.
-func (r Request) finalURLTemplate() url.URL {
- newParams := url.Values{}
- v := []string{"{value}"}
- for k := range r.params {
- newParams[k] = v
- }
- r.params = newParams
- url := r.URL()
- segments := strings.Split(r.URL().Path, "/")
- groupIndex := 0
- index := 0
- if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) {
- groupIndex += len(strings.Split(r.baseURL.Path, "/"))
- }
- if groupIndex >= len(segments) {
- return *url
- }
-
- const CoreGroupPrefix = "api"
- const NamedGroupPrefix = "apis"
- isCoreGroup := segments[groupIndex] == CoreGroupPrefix
- isNamedGroup := segments[groupIndex] == NamedGroupPrefix
- if isCoreGroup {
- // checking the case of core group with /api/v1/... format
- index = groupIndex + 2
- } else if isNamedGroup {
- // checking the case of named group with /apis/apps/v1/... format
- index = groupIndex + 3
- } else {
- // this should not happen that the only two possibilities are /api... and /apis..., just want to put an
- // outlet here in case more API groups are added in future if ever possible:
- // https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups
- // if a wrong API groups name is encountered, return the {prefix} for url.Path
- url.Path = "/{prefix}"
- url.RawQuery = ""
- return *url
- }
- //switch segLength := len(segments) - index; segLength {
- switch {
- // case len(segments) - index == 1:
- // resource (with no name) do nothing
- case len(segments)-index == 2:
- // /$RESOURCE/$NAME: replace $NAME with {name}
- segments[index+1] = "{name}"
- case len(segments)-index == 3:
- if segments[index+2] == "finalize" || segments[index+2] == "status" {
- // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
- segments[index+1] = "{name}"
- } else {
- // /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace}
- segments[index+1] = "{namespace}"
- }
- case len(segments)-index >= 4:
- segments[index+1] = "{namespace}"
- // /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace}, $NAME with {name}
- if segments[index+3] != "finalize" && segments[index+3] != "status" {
- // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
- segments[index+3] = "{name}"
- }
- }
- url.Path = path.Join(segments...)
- return *url
-}
-
-func (r *Request) tryThrottle() {
- now := time.Now()
- if r.throttle != nil {
- r.throttle.Accept()
- }
- if latency := time.Since(now); latency > longThrottleLatency {
- klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
- }
-}
-
-// Watch attempts to begin watching the requested location.
-// Returns a watch.Interface, or an error.
-func (r *Request) Watch() (watch.Interface, error) {
- return r.WatchWithSpecificDecoders(
- func(body io.ReadCloser) streaming.Decoder {
- framer := r.serializers.Framer.NewFrameReader(body)
- return streaming.NewDecoder(framer, r.serializers.StreamingSerializer)
- },
- r.serializers.Decoder,
- )
-}
-
-// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder.
-// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content
-// Returns a watch.Interface, or an error.
-func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) {
- // We specifically don't want to rate limit watches, so we
- // don't use r.throttle here.
- if r.err != nil {
- return nil, r.err
- }
- if r.serializers.Framer == nil {
- return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType)
- }
-
- url := r.URL().String()
- req, err := http.NewRequest(r.verb, url, r.body)
- if err != nil {
- return nil, err
- }
- if r.ctx != nil {
- req = req.WithContext(r.ctx)
- }
- req.Header = r.headers
- client := r.client
- if client == nil {
- client = http.DefaultClient
- }
- r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
- resp, err := client.Do(req)
- updateURLMetrics(r, resp, err)
- if r.baseURL != nil {
- if err != nil {
- r.backoffMgr.UpdateBackoff(r.baseURL, err, 0)
- } else {
- r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode)
- }
- }
- if err != nil {
- // The watch stream mechanism handles many common partial data errors, so closed
- // connections can be retried in many cases.
- if net.IsProbableEOF(err) {
- return watch.NewEmptyWatch(), nil
- }
- return nil, err
- }
- if resp.StatusCode != http.StatusOK {
- defer resp.Body.Close()
- if result := r.transformResponse(resp, req); result.err != nil {
- return nil, result.err
- }
- return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
- }
- wrapperDecoder := wrapperDecoderFn(resp.Body)
- return watch.NewStreamWatcher(
- restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder),
- // use 500 to indicate that the cause of the error is unknown - other error codes
- // are more specific to HTTP interactions, and set a reason
- errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"),
- ), nil
-}
-
-// updateURLMetrics is a convenience function for pushing metrics.
-// It also handles corner cases for incomplete/invalid request data.
-func updateURLMetrics(req *Request, resp *http.Response, err error) {
- url := "none"
- if req.baseURL != nil {
- url = req.baseURL.Host
- }
-
- // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric
- // system so we just report them as `<error>`.
- if err != nil {
- metrics.RequestResult.Increment("<error>", req.verb, url)
- } else {
- //Metrics for failure codes
- metrics.RequestResult.Increment(strconv.Itoa(resp.StatusCode), req.verb, url)
- }
-}
-
-// Stream formats and executes the request, and offers streaming of the response.
-// Returns io.ReadCloser which could be used for streaming of the response, or an error
-// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object.
-// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response.
-func (r *Request) Stream() (io.ReadCloser, error) {
- if r.err != nil {
- return nil, r.err
- }
-
- r.tryThrottle()
-
- url := r.URL().String()
- req, err := http.NewRequest(r.verb, url, nil)
- if err != nil {
- return nil, err
- }
- if r.ctx != nil {
- req = req.WithContext(r.ctx)
- }
- req.Header = r.headers
- client := r.client
- if client == nil {
- client = http.DefaultClient
- }
- r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
- resp, err := client.Do(req)
- updateURLMetrics(r, resp, err)
- if r.baseURL != nil {
- if err != nil {
- r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
- } else {
- r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
- }
- }
- if err != nil {
- return nil, err
- }
-
- switch {
- case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
- return resp.Body, nil
-
- default:
- // ensure we close the body before returning the error
- defer resp.Body.Close()
-
- result := r.transformResponse(resp, req)
- err := result.Error()
- if err == nil {
- err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
- }
- return nil, err
- }
-}
-
-// request connects to the server and invokes the provided function when a server response is
-// received. It handles retry behavior and up front validation of requests. It will invoke
-// fn at most once. It will return an error if a problem occurred prior to connecting to the
-// server - the provided function is responsible for handling server errors.
-func (r *Request) request(fn func(*http.Request, *http.Response)) error {
- //Metrics for total request latency
- start := time.Now()
- defer func() {
- metrics.RequestLatency.Observe(r.verb, r.finalURLTemplate(), time.Since(start))
- }()
-
- if r.err != nil {
- klog.V(4).Infof("Error in request: %v", r.err)
- return r.err
- }
-
- // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace)
- if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 {
- return fmt.Errorf("an empty namespace may not be set when a resource name is provided")
- }
- if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 {
- return fmt.Errorf("an empty namespace may not be set during creation")
- }
-
- client := r.client
- if client == nil {
- client = http.DefaultClient
- }
-
- // Right now we make about ten retry attempts if we get a Retry-After response.
- maxRetries := 10
- retries := 0
- for {
- url := r.URL().String()
- req, err := http.NewRequest(r.verb, url, r.body)
- if err != nil {
- return err
- }
- if r.timeout > 0 {
- if r.ctx == nil {
- r.ctx = context.Background()
- }
- var cancelFn context.CancelFunc
- r.ctx, cancelFn = context.WithTimeout(r.ctx, r.timeout)
- defer cancelFn()
- }
- if r.ctx != nil {
- req = req.WithContext(r.ctx)
- }
- req.Header = r.headers
-
- r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
- if retries > 0 {
- // We are retrying the request that we already send to apiserver
- // at least once before.
- // This request should also be throttled with the client-internal throttler.
- r.tryThrottle()
- }
- resp, err := client.Do(req)
- updateURLMetrics(r, resp, err)
- if err != nil {
- r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
- } else {
- r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
- }
- if err != nil {
- // "Connection reset by peer" is usually a transient error.
- // Thus in case of "GET" operations, we simply retry it.
- // We are not automatically retrying "write" operations, as
- // they are not idempotent.
- if !net.IsConnectionReset(err) || r.verb != "GET" {
- return err
- }
- // For the purpose of retry, we set the artificial "retry-after" response.
- // TODO: Should we clean the original response if it exists?
- resp = &http.Response{
- StatusCode: http.StatusInternalServerError,
- Header: http.Header{"Retry-After": []string{"1"}},
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
- }
- }
-
- done := func() bool {
- // Ensure the response body is fully read and closed
- // before we reconnect, so that we reuse the same TCP
- // connection.
- defer func() {
- const maxBodySlurpSize = 2 << 10
- if resp.ContentLength <= maxBodySlurpSize {
- io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
- }
- resp.Body.Close()
- }()
-
- retries++
- if seconds, wait := checkWait(resp); wait && retries < maxRetries {
- if seeker, ok := r.body.(io.Seeker); ok && r.body != nil {
- _, err := seeker.Seek(0, 0)
- if err != nil {
- klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
- fn(req, resp)
- return true
- }
- }
-
- klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
- r.backoffMgr.Sleep(time.Duration(seconds) * time.Second)
- return false
- }
- fn(req, resp)
- return true
- }()
- if done {
- return nil
- }
- }
-}
-
-// Do formats and executes the request. Returns a Result object for easy response
-// processing.
-//
-// Error type:
-// * If the request can't be constructed, or an error happened earlier while building its
-// arguments: *RequestConstructionError
-// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
-// * http.Client.Do errors are returned directly.
-func (r *Request) Do() Result {
- r.tryThrottle()
-
- var result Result
- err := r.request(func(req *http.Request, resp *http.Response) {
- result = r.transformResponse(resp, req)
- })
- if err != nil {
- return Result{err: err}
- }
- return result
-}
-
-// DoRaw executes the request but does not process the response body.
-func (r *Request) DoRaw() ([]byte, error) {
- r.tryThrottle()
-
- var result Result
- err := r.request(func(req *http.Request, resp *http.Response) {
- result.body, result.err = ioutil.ReadAll(resp.Body)
- glogBody("Response Body", result.body)
- if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
- result.err = r.transformUnstructuredResponseError(resp, req, result.body)
- }
- })
- if err != nil {
- return nil, err
- }
- return result.body, result.err
-}
-
-// transformResponse converts an API response into a structured API object
-func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
- var body []byte
- if resp.Body != nil {
- data, err := ioutil.ReadAll(resp.Body)
- switch err.(type) {
- case nil:
- body = data
- case http2.StreamError:
- // This is trying to catch the scenario that the server may close the connection when sending the
- // response body. This can be caused by server timeout due to a slow network connection.
- // TODO: Add test for this. Steps may be:
- // 1. client-go (or kubectl) sends a GET request.
- // 2. Apiserver sends back the headers and then part of the body
- // 3. Apiserver closes connection.
- // 4. client-go should catch this and return an error.
- klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
- streamErr := fmt.Errorf("Stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err)
- return Result{
- err: streamErr,
- }
- default:
- klog.Errorf("Unexpected error when reading response body: %v", err)
- unexpectedErr := fmt.Errorf("Unexpected error when reading response body. Please retry. Original error: %v", err)
- return Result{
- err: unexpectedErr,
- }
- }
- }
-
- glogBody("Response Body", body)
-
- // verify the content type is accurate
- contentType := resp.Header.Get("Content-Type")
- decoder := r.serializers.Decoder
- if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) {
- mediaType, params, err := mime.ParseMediaType(contentType)
- if err != nil {
- return Result{err: errors.NewInternalError(err)}
- }
- decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params)
- if err != nil {
- // if we fail to negotiate a decoder, treat this as an unstructured error
- switch {
- case resp.StatusCode == http.StatusSwitchingProtocols:
- // no-op, we've been upgraded
- case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
- return Result{err: r.transformUnstructuredResponseError(resp, req, body)}
- }
- return Result{
- body: body,
- contentType: contentType,
- statusCode: resp.StatusCode,
- }
- }
- }
-
- switch {
- case resp.StatusCode == http.StatusSwitchingProtocols:
- // no-op, we've been upgraded
- case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
- // calculate an unstructured error from the response which the Result object may use if the caller
- // did not return a structured error.
- retryAfter, _ := retryAfterSeconds(resp)
- err := r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
- return Result{
- body: body,
- contentType: contentType,
- statusCode: resp.StatusCode,
- decoder: decoder,
- err: err,
- }
- }
-
- return Result{
- body: body,
- contentType: contentType,
- statusCode: resp.StatusCode,
- decoder: decoder,
- }
-}
-
-// truncateBody decides if the body should be truncated, based on the glog Verbosity.
-func truncateBody(body string) string {
- max := 0
- switch {
- case bool(klog.V(10)):
- return body
- case bool(klog.V(9)):
- max = 10240
- case bool(klog.V(8)):
- max = 1024
- }
-
- if len(body) <= max {
- return body
- }
-
- return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max)
-}
-
-// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against
-// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
-// whether the body is printable.
-func glogBody(prefix string, body []byte) {
- if klog.V(8) {
- if bytes.IndexFunc(body, func(r rune) bool {
- return r < 0x0a
- }) != -1 {
- klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
- } else {
- klog.Infof("%s: %s", prefix, truncateBody(string(body)))
- }
- }
-}
-
-// maxUnstructuredResponseTextBytes is an upper bound on how much output to include in the unstructured error.
-const maxUnstructuredResponseTextBytes = 2048
-
-// transformUnstructuredResponseError handles an error from the server that is not in a structured form.
-// It is expected to transform any response that is not recognizable as a clear server sent error from the
-// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries
-// introduce a level of uncertainty to the responses returned by servers that in common use result in
-// unexpected responses. The rough structure is:
-//
-// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes
-// - this is the happy path
-// - when you get this output, trust what the server sends
-// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
-// generate a reasonable facsimile of the original failure.
-// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
-// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
-// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
-// initial contact, the presence of mismatched body contents from posted content types
-// - Give these a separate distinct error type and capture as much as possible of the original message
-//
-// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
-func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {
- if body == nil && resp.Body != nil {
- if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil {
- body = data
- }
- }
- retryAfter, _ := retryAfterSeconds(resp)
- return r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
-}
-
-// newUnstructuredResponseError instantiates the appropriate generic error for the provided input. It also logs the body.
-func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, statusCode int, method string, retryAfter int) error {
- // cap the amount of output we create
- if len(body) > maxUnstructuredResponseTextBytes {
- body = body[:maxUnstructuredResponseTextBytes]
- }
-
- message := "unknown"
- if isTextResponse {
- message = strings.TrimSpace(string(body))
- }
- var groupResource schema.GroupResource
- if len(r.resource) > 0 {
- groupResource.Group = r.content.GroupVersion.Group
- groupResource.Resource = r.resource
- }
- return errors.NewGenericServerResponse(
- statusCode,
- method,
- groupResource,
- r.resourceName,
- message,
- retryAfter,
- true,
- )
-}
-
-// isTextResponse returns true if the response appears to be a textual media type.
-func isTextResponse(resp *http.Response) bool {
- contentType := resp.Header.Get("Content-Type")
- if len(contentType) == 0 {
- return true
- }
- media, _, err := mime.ParseMediaType(contentType)
- if err != nil {
- return false
- }
- return strings.HasPrefix(media, "text/")
-}
-
-// checkWait returns true along with a number of seconds if the server instructed us to wait
-// before retrying.
-func checkWait(resp *http.Response) (int, bool) {
- switch r := resp.StatusCode; {
- // any 500 error code and 429 can trigger a wait
- case r == http.StatusTooManyRequests, r >= 500:
- default:
- return 0, false
- }
- i, ok := retryAfterSeconds(resp)
- return i, ok
-}
-
-// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if
-// the header was missing or not a valid number.
-func retryAfterSeconds(resp *http.Response) (int, bool) {
- if h := resp.Header.Get("Retry-After"); len(h) > 0 {
- if i, err := strconv.Atoi(h); err == nil {
- return i, true
- }
- }
- return 0, false
-}
-
-// Result contains the result of calling Request.Do().
-type Result struct {
- body []byte
- contentType string
- err error
- statusCode int
-
- decoder runtime.Decoder
-}
-
-// Raw returns the raw result.
-func (r Result) Raw() ([]byte, error) {
- return r.body, r.err
-}
-
-// Get returns the result as an object, which means it passes through the decoder.
-// If the returned object is of type Status and has .Status != StatusSuccess, the
-// additional information in Status will be used to enrich the error.
-func (r Result) Get() (runtime.Object, error) {
- if r.err != nil {
- // Check whether the result has a Status object in the body and prefer that.
- return nil, r.Error()
- }
- if r.decoder == nil {
- return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType)
- }
-
- // decode, but if the result is Status return that as an error instead.
- out, _, err := r.decoder.Decode(r.body, nil, nil)
- if err != nil {
- return nil, err
- }
- switch t := out.(type) {
- case *metav1.Status:
- // any status besides StatusSuccess is considered an error.
- if t.Status != metav1.StatusSuccess {
- return nil, errors.FromObject(t)
- }
- }
- return out, nil
-}
-
-// StatusCode returns the HTTP status code of the request. (Only valid if no
-// error was returned.)
-func (r Result) StatusCode(statusCode *int) Result {
- *statusCode = r.statusCode
- return r
-}
-
-// Into stores the result into obj, if possible. If obj is nil it is ignored.
-// If the returned object is of type Status and has .Status != StatusSuccess, the
-// additional information in Status will be used to enrich the error.
-func (r Result) Into(obj runtime.Object) error {
- if r.err != nil {
- // Check whether the result has a Status object in the body and prefer that.
- return r.Error()
- }
- if r.decoder == nil {
- return fmt.Errorf("serializer for %s doesn't exist", r.contentType)
- }
- if len(r.body) == 0 {
- return fmt.Errorf("0-length response with status code: %d and content type: %s",
- r.statusCode, r.contentType)
- }
-
- out, _, err := r.decoder.Decode(r.body, nil, obj)
- if err != nil || out == obj {
- return err
- }
- // if a different object is returned, see if it is Status and avoid double decoding
- // the object.
- switch t := out.(type) {
- case *metav1.Status:
- // any status besides StatusSuccess is considered an error.
- if t.Status != metav1.StatusSuccess {
- return errors.FromObject(t)
- }
- }
- return nil
-}
-
-// WasCreated updates the provided bool pointer to whether the server returned
-// 201 created or a different response.
-func (r Result) WasCreated(wasCreated *bool) Result {
- *wasCreated = r.statusCode == http.StatusCreated
- return r
-}
-
-// Error returns the error executing the request, nil if no error occurred.
-// If the returned object is of type Status and has Status != StatusSuccess, the
-// additional information in Status will be used to enrich the error.
-// See the Request.Do() comment for what errors you might get.
-func (r Result) Error() error {
- // if we have received an unexpected server error, and we have a body and decoder, we can try to extract
- // a Status object.
- if r.err == nil || !errors.IsUnexpectedServerError(r.err) || len(r.body) == 0 || r.decoder == nil {
- return r.err
- }
-
- // attempt to convert the body into a Status object
- // to be backwards compatible with old servers that do not return a version, default to "v1"
- out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil)
- if err != nil {
- klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
- return r.err
- }
- switch t := out.(type) {
- case *metav1.Status:
- // because we default the kind, we *must* check for StatusFailure
- if t.Status == metav1.StatusFailure {
- return errors.FromObject(t)
- }
- }
- return r.err
-}
-
-// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store)
-var NameMayNotBe = []string{".", ".."}
-
-// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store)
-var NameMayNotContain = []string{"/", "%"}
-
-// IsValidPathSegmentName validates the name can be safely encoded as a path segment
-func IsValidPathSegmentName(name string) []string {
- for _, illegalName := range NameMayNotBe {
- if name == illegalName {
- return []string{fmt.Sprintf(`may not be '%s'`, illegalName)}
- }
- }
-
- var errors []string
- for _, illegalContent := range NameMayNotContain {
- if strings.Contains(name, illegalContent) {
- errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
- }
- }
-
- return errors
-}
-
-// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment
-// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid
-func IsValidPathSegmentPrefix(name string) []string {
- var errors []string
- for _, illegalContent := range NameMayNotContain {
- if strings.Contains(name, illegalContent) {
- errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
- }
- }
-
- return errors
-}
-
-// ValidatePathSegmentName validates the name can be safely encoded as a path segment
-func ValidatePathSegmentName(name string, prefix bool) []string {
- if prefix {
- return IsValidPathSegmentPrefix(name)
- }
- return IsValidPathSegmentName(name)
-}
diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go
deleted file mode 100644
index de33ecbfc..000000000
--- a/vendor/k8s.io/client-go/rest/transport.go
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rest
-
-import (
- "crypto/tls"
- "errors"
- "net/http"
-
- "k8s.io/client-go/plugin/pkg/client/auth/exec"
- "k8s.io/client-go/transport"
-)
-
-// TLSConfigFor returns a tls.Config that will provide the transport level security defined
-// by the provided Config. Will return nil if no transport level security is requested.
-func TLSConfigFor(config *Config) (*tls.Config, error) {
- cfg, err := config.TransportConfig()
- if err != nil {
- return nil, err
- }
- return transport.TLSConfigFor(cfg)
-}
-
-// TransportFor returns an http.RoundTripper that will provide the authentication
-// or transport level security defined by the provided Config. Will return the
-// default http.DefaultTransport if no special case behavior is needed.
-func TransportFor(config *Config) (http.RoundTripper, error) {
- cfg, err := config.TransportConfig()
- if err != nil {
- return nil, err
- }
- return transport.New(cfg)
-}
-
-// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the
-// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack
-// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use
-// the higher level TransportFor or RESTClientFor methods.
-func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) {
- cfg, err := config.TransportConfig()
- if err != nil {
- return nil, err
- }
- return transport.HTTPWrappersForConfig(cfg, rt)
-}
-
-// TransportConfig converts a client config to an appropriate transport config.
-func (c *Config) TransportConfig() (*transport.Config, error) {
- conf := &transport.Config{
- UserAgent: c.UserAgent,
- Transport: c.Transport,
- WrapTransport: c.WrapTransport,
- TLS: transport.TLSConfig{
- Insecure: c.Insecure,
- ServerName: c.ServerName,
- CAFile: c.CAFile,
- CAData: c.CAData,
- CertFile: c.CertFile,
- CertData: c.CertData,
- KeyFile: c.KeyFile,
- KeyData: c.KeyData,
- },
- Username: c.Username,
- Password: c.Password,
- BearerToken: c.BearerToken,
- BearerTokenFile: c.BearerTokenFile,
- Impersonate: transport.ImpersonationConfig{
- UserName: c.Impersonate.UserName,
- Groups: c.Impersonate.Groups,
- Extra: c.Impersonate.Extra,
- },
- Dial: c.Dial,
- }
-
- if c.ExecProvider != nil && c.AuthProvider != nil {
- return nil, errors.New("execProvider and authProvider cannot be used in combination")
- }
-
- if c.ExecProvider != nil {
- provider, err := exec.GetAuthenticator(c.ExecProvider)
- if err != nil {
- return nil, err
- }
- if err := provider.UpdateTransportConfig(conf); err != nil {
- return nil, err
- }
- }
- if c.AuthProvider != nil {
- provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister)
- if err != nil {
- return nil, err
- }
- conf.Wrap(provider.WrapTransport)
- }
- return conf, nil
-}
-
-// Wrap adds a transport middleware function that will give the caller
-// an opportunity to wrap the underlying http.RoundTripper prior to the
-// first API call being made. The provided function is invoked after any
-// existing transport wrappers are invoked.
-func (c *Config) Wrap(fn transport.WrapperFunc) {
- c.WrapTransport = transport.Wrappers(c.WrapTransport, fn)
-}
diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go
deleted file mode 100644
index a56d1838d..000000000
--- a/vendor/k8s.io/client-go/rest/url_utils.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rest
-
-import (
- "fmt"
- "net/url"
- "path"
-
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// DefaultServerURL converts a host, host:port, or URL string to the default base server API path
-// to use with a Client at a given API version following the standard conventions for a
-// Kubernetes API.
-func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, defaultTLS bool) (*url.URL, string, error) {
- if host == "" {
- return nil, "", fmt.Errorf("host must be a URL or a host:port pair")
- }
- base := host
- hostURL, err := url.Parse(base)
- if err != nil || hostURL.Scheme == "" || hostURL.Host == "" {
- scheme := "http://"
- if defaultTLS {
- scheme = "https://"
- }
- hostURL, err = url.Parse(scheme + base)
- if err != nil {
- return nil, "", err
- }
- if hostURL.Path != "" && hostURL.Path != "/" {
- return nil, "", fmt.Errorf("host must be a URL or a host:port pair: %q", base)
- }
- }
-
- // hostURL.Path is optional; a non-empty Path is treated as a prefix that is to be applied to
- // all URIs used to access the host. this is useful when there's a proxy in front of the
- // apiserver that has relocated the apiserver endpoints, forwarding all requests from, for
- // example, /a/b/c to the apiserver. in this case the Path should be /a/b/c.
- //
- // if running without a frontend proxy (that changes the location of the apiserver), then
- // hostURL.Path should be blank.
- //
- // versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base
- versionedAPIPath := DefaultVersionedAPIPath(apiPath, groupVersion)
-
- return hostURL, versionedAPIPath, nil
-}
-
-// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given
-// API path, following the standard conventions of the Kubernetes API.
-func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string {
- versionedAPIPath := path.Join("/", apiPath)
-
- // Add the version to the end of the path
- if len(groupVersion.Group) > 0 {
- versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Group, groupVersion.Version)
-
- } else {
- versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version)
- }
-
- return versionedAPIPath
-}
-
-// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
-// requires Host and Version to be set prior to being called.
-func defaultServerUrlFor(config *Config) (*url.URL, string, error) {
- // TODO: move the default to secure when the apiserver supports TLS by default
- // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA."
- hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0
- hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0
- defaultTLS := hasCA || hasCert || config.Insecure
- host := config.Host
- if host == "" {
- host = "localhost"
- }
-
- if config.GroupVersion != nil {
- return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS)
- }
- return DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS)
-}
diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go
deleted file mode 100644
index d00e42f86..000000000
--- a/vendor/k8s.io/client-go/rest/urlbackoff.go
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package rest
-
-import (
- "net/url"
- "time"
-
- "k8s.io/apimachinery/pkg/util/sets"
- "k8s.io/client-go/util/flowcontrol"
- "k8s.io/klog"
-)
-
-// Set of resp. Codes that we backoff for.
-// In general these should be errors that indicate a server is overloaded.
-// These shouldn't be configured by any user, we set them based on conventions
-// described in
-var serverIsOverloadedSet = sets.NewInt(429)
-var maxResponseCode = 499
-
-type BackoffManager interface {
- UpdateBackoff(actualUrl *url.URL, err error, responseCode int)
- CalculateBackoff(actualUrl *url.URL) time.Duration
- Sleep(d time.Duration)
-}
-
-// URLBackoff struct implements the semantics on top of Backoff which
-// we need for URL specific exponential backoff.
-type URLBackoff struct {
- // Uses backoff as underlying implementation.
- Backoff *flowcontrol.Backoff
-}
-
-// NoBackoff is a stub implementation, can be used for mocking or else as a default.
-type NoBackoff struct {
-}
-
-func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
- // do nothing.
-}
-
-func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
- return 0 * time.Second
-}
-
-func (n *NoBackoff) Sleep(d time.Duration) {
- time.Sleep(d)
-}
-
-// Disable makes the backoff trivial, i.e., sets it to zero. This might be used
-// by tests which want to run 1000s of mock requests without slowing down.
-func (b *URLBackoff) Disable() {
- klog.V(4).Infof("Disabling backoff strategy")
- b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
-}
-
-// baseUrlKey returns the key which urls will be mapped to.
-// For example, 127.0.0.1:8080/api/v2/abcde -> 127.0.0.1:8080.
-func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string {
- // Simple implementation for now, just the host.
- // We may backoff specific paths (i.e. "pods") differentially
- // in the future.
- host, err := url.Parse(rawurl.String())
- if err != nil {
- klog.V(4).Infof("Error extracting url: %v", rawurl)
- panic("bad url!")
- }
- return host.Host
-}
-
-// UpdateBackoff updates backoff metadata
-func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
- // range for retry counts that we store is [0,13]
- if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) {
- b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now())
- return
- } else if responseCode >= 300 || err != nil {
- klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
- }
-
- //If we got this far, there is no backoff required for this URL anymore.
- b.Backoff.Reset(b.baseUrlKey(actualUrl))
-}
-
-// CalculateBackoff takes a url and back's off exponentially,
-// based on its knowledge of existing failures.
-func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
- return b.Backoff.Get(b.baseUrlKey(actualUrl))
-}
-
-func (b *URLBackoff) Sleep(d time.Duration) {
- b.Backoff.Clock.Sleep(d)
-}
diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go
deleted file mode 100644
index e95c020b2..000000000
--- a/vendor/k8s.io/client-go/rest/watch/decoder.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package versioned
-
-import (
- "fmt"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/serializer/streaming"
- "k8s.io/apimachinery/pkg/watch"
-)
-
-// Decoder implements the watch.Decoder interface for io.ReadClosers that
-// have contents which consist of a series of watchEvent objects encoded
-// with the given streaming decoder. The internal objects will be then
-// decoded by the embedded decoder.
-type Decoder struct {
- decoder streaming.Decoder
- embeddedDecoder runtime.Decoder
-}
-
-// NewDecoder creates an Decoder for the given writer and codec.
-func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder {
- return &Decoder{
- decoder: decoder,
- embeddedDecoder: embeddedDecoder,
- }
-}
-
-// Decode blocks until it can return the next object in the reader. Returns an error
-// if the reader is closed or an object can't be decoded.
-func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) {
- var got metav1.WatchEvent
- res, _, err := d.decoder.Decode(nil, &got)
- if err != nil {
- return "", nil, err
- }
- if res != &got {
- return "", nil, fmt.Errorf("unable to decode to metav1.Event")
- }
- switch got.Type {
- case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark):
- default:
- return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type)
- }
-
- obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw)
- if err != nil {
- return "", nil, fmt.Errorf("unable to decode watch event: %v", err)
- }
- return watch.EventType(got.Type), obj, nil
-}
-
-// Close closes the underlying r.
-func (d *Decoder) Close() {
- d.decoder.Close()
-}
diff --git a/vendor/k8s.io/client-go/rest/watch/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go
deleted file mode 100644
index e55aa12d9..000000000
--- a/vendor/k8s.io/client-go/rest/watch/encoder.go
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package versioned
-
-import (
- "encoding/json"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/serializer/streaming"
- "k8s.io/apimachinery/pkg/watch"
-)
-
-// Encoder serializes watch.Events into io.Writer. The internal objects
-// are encoded using embedded encoder, and the outer Event is serialized
-// using encoder.
-// TODO: this type is only used by tests
-type Encoder struct {
- encoder streaming.Encoder
- embeddedEncoder runtime.Encoder
-}
-
-func NewEncoder(encoder streaming.Encoder, embeddedEncoder runtime.Encoder) *Encoder {
- return &Encoder{
- encoder: encoder,
- embeddedEncoder: embeddedEncoder,
- }
-}
-
-// Encode writes an event to the writer. Returns an error
-// if the writer is closed or an object can't be encoded.
-func (e *Encoder) Encode(event *watch.Event) error {
- data, err := runtime.Encode(e.embeddedEncoder, event.Object)
- if err != nil {
- return err
- }
- // FIXME: get rid of json.RawMessage.
- return e.encoder.Encode(&metav1.WatchEvent{
- Type: string(event.Type),
- Object: runtime.RawExtension{Raw: json.RawMessage(data)},
- })
-}
diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
deleted file mode 100644
index c1ab45f33..000000000
--- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package rest
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) {
- *out = *in
- if in.CertData != nil {
- in, out := &in.CertData, &out.CertData
- *out = make([]byte, len(*in))
- copy(*out, *in)
- }
- if in.KeyData != nil {
- in, out := &in.KeyData, &out.KeyData
- *out = make([]byte, len(*in))
- copy(*out, *in)
- }
- if in.CAData != nil {
- in, out := &in.CAData, &out.CAData
- *out = make([]byte, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSClientConfig.
-func (in *TLSClientConfig) DeepCopy() *TLSClientConfig {
- if in == nil {
- return nil
- }
- out := new(TLSClientConfig)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
deleted file mode 100644
index 5871575a6..000000000
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// +k8s:deepcopy-gen=package
-
-package api
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
deleted file mode 100644
index 65a36936b..000000000
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package api
-
-import (
- "encoding/base64"
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "path"
- "path/filepath"
-)
-
-func init() {
- sDec, _ := base64.StdEncoding.DecodeString("REDACTED+")
- redactedBytes = []byte(string(sDec))
- sDec, _ = base64.StdEncoding.DecodeString("DATA+OMITTED")
- dataOmittedBytes = []byte(string(sDec))
-}
-
-// IsConfigEmpty returns true if the config is empty.
-func IsConfigEmpty(config *Config) bool {
- return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 &&
- len(config.CurrentContext) == 0 &&
- len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors &&
- len(config.Extensions) == 0
-}
-
-// MinifyConfig read the current context and uses that to keep only the relevant pieces of config
-// This is useful for making secrets based on kubeconfig files
-func MinifyConfig(config *Config) error {
- if len(config.CurrentContext) == 0 {
- return errors.New("current-context must exist in order to minify")
- }
-
- currContext, exists := config.Contexts[config.CurrentContext]
- if !exists {
- return fmt.Errorf("cannot locate context %v", config.CurrentContext)
- }
-
- newContexts := map[string]*Context{}
- newContexts[config.CurrentContext] = currContext
-
- newClusters := map[string]*Cluster{}
- if len(currContext.Cluster) > 0 {
- if _, exists := config.Clusters[currContext.Cluster]; !exists {
- return fmt.Errorf("cannot locate cluster %v", currContext.Cluster)
- }
-
- newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster]
- }
-
- newAuthInfos := map[string]*AuthInfo{}
- if len(currContext.AuthInfo) > 0 {
- if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists {
- return fmt.Errorf("cannot locate user %v", currContext.AuthInfo)
- }
-
- newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo]
- }
-
- config.AuthInfos = newAuthInfos
- config.Clusters = newClusters
- config.Contexts = newContexts
-
- return nil
-}
-
-var (
- redactedBytes []byte
- dataOmittedBytes []byte
-)
-
-// Flatten redacts raw data entries from the config object for a human-readable view.
-func ShortenConfig(config *Config) {
- // trick json encoder into printing a human readable string in the raw data
- // by base64 decoding what we want to print. Relies on implementation of
- // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte
- for key, authInfo := range config.AuthInfos {
- if len(authInfo.ClientKeyData) > 0 {
- authInfo.ClientKeyData = redactedBytes
- }
- if len(authInfo.ClientCertificateData) > 0 {
- authInfo.ClientCertificateData = redactedBytes
- }
- config.AuthInfos[key] = authInfo
- }
- for key, cluster := range config.Clusters {
- if len(cluster.CertificateAuthorityData) > 0 {
- cluster.CertificateAuthorityData = dataOmittedBytes
- }
- config.Clusters[key] = cluster
- }
-}
-
-// Flatten changes the config object into a self contained config (useful for making secrets)
-func FlattenConfig(config *Config) error {
- for key, authInfo := range config.AuthInfos {
- baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "")
- if err != nil {
- return err
- }
-
- if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil {
- return err
- }
- if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil {
- return err
- }
-
- config.AuthInfos[key] = authInfo
- }
- for key, cluster := range config.Clusters {
- baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "")
- if err != nil {
- return err
- }
-
- if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil {
- return err
- }
-
- config.Clusters[key] = cluster
- }
-
- return nil
-}
-
-func FlattenContent(path *string, contents *[]byte, baseDir string) error {
- if len(*path) != 0 {
- if len(*contents) > 0 {
- return errors.New("cannot have values for both path and contents")
- }
-
- var err error
- absPath := ResolvePath(*path, baseDir)
- *contents, err = ioutil.ReadFile(absPath)
- if err != nil {
- return err
- }
-
- *path = ""
- }
-
- return nil
-}
-
-// ResolvePath returns the path as an absolute paths, relative to the given base directory
-func ResolvePath(path string, base string) string {
- // Don't resolve empty paths
- if len(path) > 0 {
- // Don't resolve absolute paths
- if !filepath.IsAbs(path) {
- return filepath.Join(base, path)
- }
- }
-
- return path
-}
-
-func MakeAbs(path, base string) (string, error) {
- if filepath.IsAbs(path) {
- return path, nil
- }
- if len(base) == 0 {
- cwd, err := os.Getwd()
- if err != nil {
- return "", err
- }
- base = cwd
- }
- return filepath.Join(base, path), nil
-}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go
deleted file mode 100644
index 2eec3881c..000000000
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/register.go
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package api
-
-import (
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// SchemeGroupVersion is group version used to register these objects
-// TODO this should be in the "kubeconfig" group
-var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
-
-var (
- SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
- AddToScheme = SchemeBuilder.AddToScheme
-)
-
-func addKnownTypes(scheme *runtime.Scheme) error {
- scheme.AddKnownTypes(SchemeGroupVersion,
- &Config{},
- )
- return nil
-}
-
-func (obj *Config) GetObjectKind() schema.ObjectKind { return obj }
-func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) {
- obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
-}
-func (obj *Config) GroupVersionKind() schema.GroupVersionKind {
- return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
-}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
deleted file mode 100644
index 990a440c6..000000000
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package api
-
-import (
- "fmt"
-
- "k8s.io/apimachinery/pkg/runtime"
-)
-
-// Where possible, json tags match the cli argument names.
-// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted.
-
-// Config holds the information needed to build connect to remote kubernetes clusters as a given user
-// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type Config struct {
- // Legacy field from pkg/api/types.go TypeMeta.
- // TODO(jlowdermilk): remove this after eliminating downstream dependencies.
- // +optional
- Kind string `json:"kind,omitempty"`
- // Legacy field from pkg/api/types.go TypeMeta.
- // TODO(jlowdermilk): remove this after eliminating downstream dependencies.
- // +optional
- APIVersion string `json:"apiVersion,omitempty"`
- // Preferences holds general information to be use for cli interactions
- Preferences Preferences `json:"preferences"`
- // Clusters is a map of referencable names to cluster configs
- Clusters map[string]*Cluster `json:"clusters"`
- // AuthInfos is a map of referencable names to user configs
- AuthInfos map[string]*AuthInfo `json:"users"`
- // Contexts is a map of referencable names to context configs
- Contexts map[string]*Context `json:"contexts"`
- // CurrentContext is the name of the context that you would like to use by default
- CurrentContext string `json:"current-context"`
- // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
- // +optional
- Extensions map[string]runtime.Object `json:"extensions,omitempty"`
-}
-
-// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
-type Preferences struct {
- // +optional
- Colors bool `json:"colors,omitempty"`
- // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
- // +optional
- Extensions map[string]runtime.Object `json:"extensions,omitempty"`
-}
-
-// Cluster contains information about how to communicate with a kubernetes cluster
-type Cluster struct {
- // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
- LocationOfOrigin string
- // Server is the address of the kubernetes cluster (https://hostname:port).
- Server string `json:"server"`
- // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
- // +optional
- InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
- // CertificateAuthority is the path to a cert file for the certificate authority.
- // +optional
- CertificateAuthority string `json:"certificate-authority,omitempty"`
- // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
- // +optional
- CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
- // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
- // +optional
- Extensions map[string]runtime.Object `json:"extensions,omitempty"`
-}
-
-// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are.
-type AuthInfo struct {
- // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
- LocationOfOrigin string
- // ClientCertificate is the path to a client cert file for TLS.
- // +optional
- ClientCertificate string `json:"client-certificate,omitempty"`
- // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
- // +optional
- ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
- // ClientKey is the path to a client key file for TLS.
- // +optional
- ClientKey string `json:"client-key,omitempty"`
- // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
- // +optional
- ClientKeyData []byte `json:"client-key-data,omitempty"`
- // Token is the bearer token for authentication to the kubernetes cluster.
- // +optional
- Token string `json:"token,omitempty"`
- // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence.
- // +optional
- TokenFile string `json:"tokenFile,omitempty"`
- // Impersonate is the username to act-as.
- // +optional
- Impersonate string `json:"act-as,omitempty"`
- // ImpersonateGroups is the groups to imperonate.
- // +optional
- ImpersonateGroups []string `json:"act-as-groups,omitempty"`
- // ImpersonateUserExtra contains additional information for impersonated user.
- // +optional
- ImpersonateUserExtra map[string][]string `json:"act-as-user-extra,omitempty"`
- // Username is the username for basic authentication to the kubernetes cluster.
- // +optional
- Username string `json:"username,omitempty"`
- // Password is the password for basic authentication to the kubernetes cluster.
- // +optional
- Password string `json:"password,omitempty"`
- // AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
- // +optional
- AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
- // Exec specifies a custom exec-based authentication plugin for the kubernetes cluster.
- // +optional
- Exec *ExecConfig `json:"exec,omitempty"`
- // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
- // +optional
- Extensions map[string]runtime.Object `json:"extensions,omitempty"`
-}
-
-// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
-type Context struct {
- // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
- LocationOfOrigin string
- // Cluster is the name of the cluster for this context
- Cluster string `json:"cluster"`
- // AuthInfo is the name of the authInfo for this context
- AuthInfo string `json:"user"`
- // Namespace is the default namespace to use on unspecified requests
- // +optional
- Namespace string `json:"namespace,omitempty"`
- // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
- // +optional
- Extensions map[string]runtime.Object `json:"extensions,omitempty"`
-}
-
-// AuthProviderConfig holds the configuration for a specified auth provider.
-type AuthProviderConfig struct {
- Name string `json:"name"`
- // +optional
- Config map[string]string `json:"config,omitempty"`
-}
-
-var _ fmt.Stringer = new(AuthProviderConfig)
-var _ fmt.GoStringer = new(AuthProviderConfig)
-
-// GoString implements fmt.GoStringer and sanitizes sensitive fields of
-// AuthProviderConfig to prevent accidental leaking via logs.
-func (c AuthProviderConfig) GoString() string {
- return c.String()
-}
-
-// String implements fmt.Stringer and sanitizes sensitive fields of
-// AuthProviderConfig to prevent accidental leaking via logs.
-func (c AuthProviderConfig) String() string {
- cfg := "<nil>"
- if c.Config != nil {
- cfg = "--- REDACTED ---"
- }
- return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg)
-}
-
-// ExecConfig specifies a command to provide client credentials. The command is exec'd
-// and outputs structured stdout holding credentials.
-//
-// See the client.authentiction.k8s.io API group for specifications of the exact input
-// and output format
-type ExecConfig struct {
- // Command to execute.
- Command string `json:"command"`
- // Arguments to pass to the command when executing it.
- // +optional
- Args []string `json:"args"`
- // Env defines additional environment variables to expose to the process. These
- // are unioned with the host's environment, as well as variables client-go uses
- // to pass argument to the plugin.
- // +optional
- Env []ExecEnvVar `json:"env"`
-
- // Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
- // the same encoding version as the input.
- APIVersion string `json:"apiVersion,omitempty"`
-}
-
-var _ fmt.Stringer = new(ExecConfig)
-var _ fmt.GoStringer = new(ExecConfig)
-
-// GoString implements fmt.GoStringer and sanitizes sensitive fields of
-// ExecConfig to prevent accidental leaking via logs.
-func (c ExecConfig) GoString() string {
- return c.String()
-}
-
-// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig
-// to prevent accidental leaking via logs.
-func (c ExecConfig) String() string {
- var args []string
- if len(c.Args) > 0 {
- args = []string{"--- REDACTED ---"}
- }
- env := "[]ExecEnvVar(nil)"
- if len(c.Env) > 0 {
- env = "[]ExecEnvVar{--- REDACTED ---}"
- }
- return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion)
-}
-
-// ExecEnvVar is used for setting environment variables when executing an exec-based
-// credential plugin.
-type ExecEnvVar struct {
- Name string `json:"name"`
- Value string `json:"value"`
-}
-
-// NewConfig is a convenience function that returns a new Config object with non-nil maps
-func NewConfig() *Config {
- return &Config{
- Preferences: *NewPreferences(),
- Clusters: make(map[string]*Cluster),
- AuthInfos: make(map[string]*AuthInfo),
- Contexts: make(map[string]*Context),
- Extensions: make(map[string]runtime.Object),
- }
-}
-
-// NewContext is a convenience function that returns a new Context
-// object with non-nil maps
-func NewContext() *Context {
- return &Context{Extensions: make(map[string]runtime.Object)}
-}
-
-// NewCluster is a convenience function that returns a new Cluster
-// object with non-nil maps
-func NewCluster() *Cluster {
- return &Cluster{Extensions: make(map[string]runtime.Object)}
-}
-
-// NewAuthInfo is a convenience function that returns a new AuthInfo
-// object with non-nil maps
-func NewAuthInfo() *AuthInfo {
- return &AuthInfo{
- Extensions: make(map[string]runtime.Object),
- ImpersonateUserExtra: make(map[string][]string),
- }
-}
-
-// NewPreferences is a convenience function that returns a new
-// Preferences object with non-nil maps
-func NewPreferences() *Preferences {
- return &Preferences{Extensions: make(map[string]runtime.Object)}
-}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
deleted file mode 100644
index 3240a7a98..000000000
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package api
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
- *out = *in
- if in.ClientCertificateData != nil {
- in, out := &in.ClientCertificateData, &out.ClientCertificateData
- *out = make([]byte, len(*in))
- copy(*out, *in)
- }
- if in.ClientKeyData != nil {
- in, out := &in.ClientKeyData, &out.ClientKeyData
- *out = make([]byte, len(*in))
- copy(*out, *in)
- }
- if in.ImpersonateGroups != nil {
- in, out := &in.ImpersonateGroups, &out.ImpersonateGroups
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.ImpersonateUserExtra != nil {
- in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra
- *out = make(map[string][]string, len(*in))
- for key, val := range *in {
- var outVal []string
- if val == nil {
- (*out)[key] = nil
- } else {
- in, out := &val, &outVal
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- (*out)[key] = outVal
- }
- }
- if in.AuthProvider != nil {
- in, out := &in.AuthProvider, &out.AuthProvider
- *out = new(AuthProviderConfig)
- (*in).DeepCopyInto(*out)
- }
- if in.Exec != nil {
- in, out := &in.Exec, &out.Exec
- *out = new(ExecConfig)
- (*in).DeepCopyInto(*out)
- }
- if in.Extensions != nil {
- in, out := &in.Extensions, &out.Extensions
- *out = make(map[string]runtime.Object, len(*in))
- for key, val := range *in {
- if val == nil {
- (*out)[key] = nil
- } else {
- (*out)[key] = val.DeepCopyObject()
- }
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo.
-func (in *AuthInfo) DeepCopy() *AuthInfo {
- if in == nil {
- return nil
- }
- out := new(AuthInfo)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) {
- *out = *in
- if in.Config != nil {
- in, out := &in.Config, &out.Config
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig.
-func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig {
- if in == nil {
- return nil
- }
- out := new(AuthProviderConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Cluster) DeepCopyInto(out *Cluster) {
- *out = *in
- if in.CertificateAuthorityData != nil {
- in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
- *out = make([]byte, len(*in))
- copy(*out, *in)
- }
- if in.Extensions != nil {
- in, out := &in.Extensions, &out.Extensions
- *out = make(map[string]runtime.Object, len(*in))
- for key, val := range *in {
- if val == nil {
- (*out)[key] = nil
- } else {
- (*out)[key] = val.DeepCopyObject()
- }
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
-func (in *Cluster) DeepCopy() *Cluster {
- if in == nil {
- return nil
- }
- out := new(Cluster)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Config) DeepCopyInto(out *Config) {
- *out = *in
- in.Preferences.DeepCopyInto(&out.Preferences)
- if in.Clusters != nil {
- in, out := &in.Clusters, &out.Clusters
- *out = make(map[string]*Cluster, len(*in))
- for key, val := range *in {
- var outVal *Cluster
- if val == nil {
- (*out)[key] = nil
- } else {
- in, out := &val, &outVal
- *out = new(Cluster)
- (*in).DeepCopyInto(*out)
- }
- (*out)[key] = outVal
- }
- }
- if in.AuthInfos != nil {
- in, out := &in.AuthInfos, &out.AuthInfos
- *out = make(map[string]*AuthInfo, len(*in))
- for key, val := range *in {
- var outVal *AuthInfo
- if val == nil {
- (*out)[key] = nil
- } else {
- in, out := &val, &outVal
- *out = new(AuthInfo)
- (*in).DeepCopyInto(*out)
- }
- (*out)[key] = outVal
- }
- }
- if in.Contexts != nil {
- in, out := &in.Contexts, &out.Contexts
- *out = make(map[string]*Context, len(*in))
- for key, val := range *in {
- var outVal *Context
- if val == nil {
- (*out)[key] = nil
- } else {
- in, out := &val, &outVal
- *out = new(Context)
- (*in).DeepCopyInto(*out)
- }
- (*out)[key] = outVal
- }
- }
- if in.Extensions != nil {
- in, out := &in.Extensions, &out.Extensions
- *out = make(map[string]runtime.Object, len(*in))
- for key, val := range *in {
- if val == nil {
- (*out)[key] = nil
- } else {
- (*out)[key] = val.DeepCopyObject()
- }
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
-func (in *Config) DeepCopy() *Config {
- if in == nil {
- return nil
- }
- out := new(Config)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Config) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Context) DeepCopyInto(out *Context) {
- *out = *in
- if in.Extensions != nil {
- in, out := &in.Extensions, &out.Extensions
- *out = make(map[string]runtime.Object, len(*in))
- for key, val := range *in {
- if val == nil {
- (*out)[key] = nil
- } else {
- (*out)[key] = val.DeepCopyObject()
- }
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context.
-func (in *Context) DeepCopy() *Context {
- if in == nil {
- return nil
- }
- out := new(Context)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
- *out = *in
- if in.Args != nil {
- in, out := &in.Args, &out.Args
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Env != nil {
- in, out := &in.Env, &out.Env
- *out = make([]ExecEnvVar, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig.
-func (in *ExecConfig) DeepCopy() *ExecConfig {
- if in == nil {
- return nil
- }
- out := new(ExecConfig)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
-func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
- if in == nil {
- return nil
- }
- out := new(ExecEnvVar)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Preferences) DeepCopyInto(out *Preferences) {
- *out = *in
- if in.Extensions != nil {
- in, out := &in.Extensions, &out.Extensions
- *out = make(map[string]runtime.Object, len(*in))
- for key, val := range *in {
- if val == nil {
- (*out)[key] = nil
- } else {
- (*out)[key] = val.DeepCopyObject()
- }
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences.
-func (in *Preferences) DeepCopy() *Preferences {
- if in == nil {
- return nil
- }
- out := new(Preferences)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS
deleted file mode 100644
index f150be536..000000000
--- a/vendor/k8s.io/client-go/tools/metrics/OWNERS
+++ /dev/null
@@ -1,9 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-reviewers:
-- wojtek-t
-- eparis
-- krousey
-- jayunit100
-- fgrzadkowski
-- tmrts
diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go
deleted file mode 100644
index a01306c65..000000000
--- a/vendor/k8s.io/client-go/tools/metrics/metrics.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package metrics provides abstractions for registering which metrics
-// to record.
-package metrics
-
-import (
- "net/url"
- "sync"
- "time"
-)
-
-var registerMetrics sync.Once
-
-// LatencyMetric observes client latency partitioned by verb and url.
-type LatencyMetric interface {
- Observe(verb string, u url.URL, latency time.Duration)
-}
-
-// ResultMetric counts response codes partitioned by method and host.
-type ResultMetric interface {
- Increment(code string, method string, host string)
-}
-
-var (
- // RequestLatency is the latency metric that rest clients will update.
- RequestLatency LatencyMetric = noopLatency{}
- // RequestResult is the result metric that rest clients will update.
- RequestResult ResultMetric = noopResult{}
-)
-
-// Register registers metrics for the rest client to use. This can
-// only be called once.
-func Register(lm LatencyMetric, rm ResultMetric) {
- registerMetrics.Do(func() {
- RequestLatency = lm
- RequestResult = rm
- })
-}
-
-type noopLatency struct{}
-
-func (noopLatency) Observe(string, url.URL, time.Duration) {}
-
-type noopResult struct{}
-
-func (noopResult) Increment(string, string, string) {}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/doc.go b/vendor/k8s.io/client-go/tools/remotecommand/doc.go
deleted file mode 100644
index ac06a9cd3..000000000
--- a/vendor/k8s.io/client-go/tools/remotecommand/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package remotecommand adds support for executing commands in containers,
-// with support for separate stdin, stdout, and stderr streams, as well as
-// TTY.
-package remotecommand // import "k8s.io/client-go/tools/remotecommand"
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
deleted file mode 100644
index 360276b65..000000000
--- a/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "fmt"
- "io"
- "io/ioutil"
-
- "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-// errorStreamDecoder interprets the data on the error channel and creates a go error object from it.
-type errorStreamDecoder interface {
- decode(message []byte) error
-}
-
-// watchErrorStream watches the errorStream for remote command error data,
-// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote
-// command exited successfully) to the returned error channel, and closes it.
-// This function returns immediately.
-func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error {
- errorChan := make(chan error)
-
- go func() {
- defer runtime.HandleCrash()
-
- message, err := ioutil.ReadAll(errorStream)
- switch {
- case err != nil && err != io.EOF:
- errorChan <- fmt.Errorf("error reading from error stream: %s", err)
- case len(message) > 0:
- errorChan <- d.decode(message)
- default:
- errorChan <- nil
- }
- close(errorChan)
- }()
-
- return errorChan
-}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/reader.go b/vendor/k8s.io/client-go/tools/remotecommand/reader.go
deleted file mode 100644
index d1f1be34c..000000000
--- a/vendor/k8s.io/client-go/tools/remotecommand/reader.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "io"
-)
-
-// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented,
-// to keep io.Copy from doing things we don't want when copying from the reader to the data stream.
-//
-// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]),
-// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call.
-// That results in an oversized call to spdystream.Stream#Write [3],
-// which results in a single oversized data frame[4] that is too large.
-//
-// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo
-// [2] https://golang.org/pkg/io/#Copy
-// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73
-// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304
-type readerWrapper struct {
- reader io.Reader
-}
-
-func (r readerWrapper) Read(p []byte) (int, error) {
- return r.reader.Read(p)
-}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
deleted file mode 100644
index 892d8d105..000000000
--- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "fmt"
- "io"
- "net/http"
- "net/url"
-
- "k8s.io/klog"
-
- "k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/apimachinery/pkg/util/remotecommand"
- restclient "k8s.io/client-go/rest"
- spdy "k8s.io/client-go/transport/spdy"
-)
-
-// StreamOptions holds information pertaining to the current streaming session:
-// input/output streams, if the client is requesting a TTY, and a terminal size queue to
-// support terminal resizing.
-type StreamOptions struct {
- Stdin io.Reader
- Stdout io.Writer
- Stderr io.Writer
- Tty bool
- TerminalSizeQueue TerminalSizeQueue
-}
-
-// Executor is an interface for transporting shell-style streams.
-type Executor interface {
- // Stream initiates the transport of the standard shell streams. It will transport any
- // non-nil stream to a remote system, and return an error if a problem occurs. If tty
- // is set, the stderr stream is not used (raw TTY manages stdout and stderr over the
- // stdout stream).
- Stream(options StreamOptions) error
-}
-
-type streamCreator interface {
- CreateStream(headers http.Header) (httpstream.Stream, error)
-}
-
-type streamProtocolHandler interface {
- stream(conn streamCreator) error
-}
-
-// streamExecutor handles transporting standard shell streams over an httpstream connection.
-type streamExecutor struct {
- upgrader spdy.Upgrader
- transport http.RoundTripper
-
- method string
- url *url.URL
- protocols []string
-}
-
-// NewSPDYExecutor connects to the provided server and upgrades the connection to
-// multiplexed bidirectional streams.
-func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
- wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config)
- if err != nil {
- return nil, err
- }
- return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url)
-}
-
-// NewSPDYExecutorForTransports connects to the provided server using the given transport,
-// upgrades the response using the given upgrader to multiplexed bidirectional streams.
-func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
- return NewSPDYExecutorForProtocols(
- transport, upgrader, method, url,
- remotecommand.StreamProtocolV4Name,
- remotecommand.StreamProtocolV3Name,
- remotecommand.StreamProtocolV2Name,
- remotecommand.StreamProtocolV1Name,
- )
-}
-
-// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to
-// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most
-// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports.
-func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) {
- return &streamExecutor{
- upgrader: upgrader,
- transport: transport,
- method: method,
- url: url,
- protocols: protocols,
- }, nil
-}
-
-// Stream opens a protocol streamer to the server and streams until a client closes
-// the connection or the server disconnects.
-func (e *streamExecutor) Stream(options StreamOptions) error {
- req, err := http.NewRequest(e.method, e.url.String(), nil)
- if err != nil {
- return fmt.Errorf("error creating request: %v", err)
- }
-
- conn, protocol, err := spdy.Negotiate(
- e.upgrader,
- &http.Client{Transport: e.transport},
- req,
- e.protocols...,
- )
- if err != nil {
- return err
- }
- defer conn.Close()
-
- var streamer streamProtocolHandler
-
- switch protocol {
- case remotecommand.StreamProtocolV4Name:
- streamer = newStreamProtocolV4(options)
- case remotecommand.StreamProtocolV3Name:
- streamer = newStreamProtocolV3(options)
- case remotecommand.StreamProtocolV2Name:
- streamer = newStreamProtocolV2(options)
- case "":
- klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
- fallthrough
- case remotecommand.StreamProtocolV1Name:
- streamer = newStreamProtocolV1(options)
- }
-
- return streamer.stream(conn)
-}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/resize.go b/vendor/k8s.io/client-go/tools/remotecommand/resize.go
deleted file mode 100644
index c838f21ba..000000000
--- a/vendor/k8s.io/client-go/tools/remotecommand/resize.go
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term
-// and were moved in order to decouple client from other term dependencies
-
-// TerminalSize represents the width and height of a terminal.
-type TerminalSize struct {
- Width uint16
- Height uint16
-}
-
-// TerminalSizeQueue is capable of returning terminal resize events as they occur.
-type TerminalSizeQueue interface {
- // Next returns the new terminal size after the terminal has been resized. It returns nil when
- // monitoring has been stopped.
- Next() *TerminalSize
-}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/vendor/k8s.io/client-go/tools/remotecommand/v1.go
deleted file mode 100644
index 4120f1f5f..000000000
--- a/vendor/k8s.io/client-go/tools/remotecommand/v1.go
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
-
- "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/klog"
-)
-
-// streamProtocolV1 implements the first version of the streaming exec & attach
-// protocol. This version has some bugs, such as not being able to detect when
-// non-interactive stdin data has ended. See http://issues.k8s.io/13394 and
-// http://issues.k8s.io/13395 for more details.
-type streamProtocolV1 struct {
- StreamOptions
-
- errorStream httpstream.Stream
- remoteStdin httpstream.Stream
- remoteStdout httpstream.Stream
- remoteStderr httpstream.Stream
-}
-
-var _ streamProtocolHandler = &streamProtocolV1{}
-
-func newStreamProtocolV1(options StreamOptions) streamProtocolHandler {
- return &streamProtocolV1{
- StreamOptions: options,
- }
-}
-
-func (p *streamProtocolV1) stream(conn streamCreator) error {
- doneChan := make(chan struct{}, 2)
- errorChan := make(chan error)
-
- cp := func(s string, dst io.Writer, src io.Reader) {
- klog.V(6).Infof("Copying %s", s)
- defer klog.V(6).Infof("Done copying %s", s)
- if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
- klog.Errorf("Error copying %s: %v", s, err)
- }
- if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr {
- doneChan <- struct{}{}
- }
- }
-
- // set up all the streams first
- var err error
- headers := http.Header{}
- headers.Set(v1.StreamType, v1.StreamTypeError)
- p.errorStream, err = conn.CreateStream(headers)
- if err != nil {
- return err
- }
- defer p.errorStream.Reset()
-
- // Create all the streams first, then start the copy goroutines. The server doesn't start its copy
- // goroutines until it's received all of the streams. If the client creates the stdin stream and
- // immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the
- // spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't
- // getting processed because the server hasn't started its copying, and it won't do that until it
- // gets all the streams. By creating all the streams first, we ensure that the server is ready to
- // process data before the client starts sending any. See https://issues.k8s.io/16373 for more info.
- if p.Stdin != nil {
- headers.Set(v1.StreamType, v1.StreamTypeStdin)
- p.remoteStdin, err = conn.CreateStream(headers)
- if err != nil {
- return err
- }
- defer p.remoteStdin.Reset()
- }
-
- if p.Stdout != nil {
- headers.Set(v1.StreamType, v1.StreamTypeStdout)
- p.remoteStdout, err = conn.CreateStream(headers)
- if err != nil {
- return err
- }
- defer p.remoteStdout.Reset()
- }
-
- if p.Stderr != nil && !p.Tty {
- headers.Set(v1.StreamType, v1.StreamTypeStderr)
- p.remoteStderr, err = conn.CreateStream(headers)
- if err != nil {
- return err
- }
- defer p.remoteStderr.Reset()
- }
-
- // now that all the streams have been created, proceed with reading & copying
-
- // always read from errorStream
- go func() {
- message, err := ioutil.ReadAll(p.errorStream)
- if err != nil && err != io.EOF {
- errorChan <- fmt.Errorf("Error reading from error stream: %s", err)
- return
- }
- if len(message) > 0 {
- errorChan <- fmt.Errorf("Error executing remote command: %s", message)
- return
- }
- }()
-
- if p.Stdin != nil {
- // TODO this goroutine will never exit cleanly (the io.Copy never unblocks)
- // because stdin is not closed until the process exits. If we try to call
- // stdin.Close(), it returns no error but doesn't unblock the copy. It will
- // exit when the process exits, instead.
- go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin})
- }
-
- waitCount := 0
- completedStreams := 0
-
- if p.Stdout != nil {
- waitCount++
- go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout)
- }
-
- if p.Stderr != nil && !p.Tty {
- waitCount++
- go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr)
- }
-
-Loop:
- for {
- select {
- case <-doneChan:
- completedStreams++
- if completedStreams == waitCount {
- break Loop
- }
- case err := <-errorChan:
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/vendor/k8s.io/client-go/tools/remotecommand/v2.go
deleted file mode 100644
index 4b0001502..000000000
--- a/vendor/k8s.io/client-go/tools/remotecommand/v2.go
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "sync"
-
- "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-// streamProtocolV2 implements version 2 of the streaming protocol for attach
-// and exec. The original streaming protocol was metav1. As a result, this
-// version is referred to as version 2, even though it is the first actual
-// numbered version.
-type streamProtocolV2 struct {
- StreamOptions
-
- errorStream io.Reader
- remoteStdin io.ReadWriteCloser
- remoteStdout io.Reader
- remoteStderr io.Reader
-}
-
-var _ streamProtocolHandler = &streamProtocolV2{}
-
-func newStreamProtocolV2(options StreamOptions) streamProtocolHandler {
- return &streamProtocolV2{
- StreamOptions: options,
- }
-}
-
-func (p *streamProtocolV2) createStreams(conn streamCreator) error {
- var err error
- headers := http.Header{}
-
- // set up error stream
- headers.Set(v1.StreamType, v1.StreamTypeError)
- p.errorStream, err = conn.CreateStream(headers)
- if err != nil {
- return err
- }
-
- // set up stdin stream
- if p.Stdin != nil {
- headers.Set(v1.StreamType, v1.StreamTypeStdin)
- p.remoteStdin, err = conn.CreateStream(headers)
- if err != nil {
- return err
- }
- }
-
- // set up stdout stream
- if p.Stdout != nil {
- headers.Set(v1.StreamType, v1.StreamTypeStdout)
- p.remoteStdout, err = conn.CreateStream(headers)
- if err != nil {
- return err
- }
- }
-
- // set up stderr stream
- if p.Stderr != nil && !p.Tty {
- headers.Set(v1.StreamType, v1.StreamTypeStderr)
- p.remoteStderr, err = conn.CreateStream(headers)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *streamProtocolV2) copyStdin() {
- if p.Stdin != nil {
- var once sync.Once
-
- // copy from client's stdin to container's stdin
- go func() {
- defer runtime.HandleCrash()
-
- // if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i <pod> -- cat`, make sure
- // we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise
- // the executed command will remain running.
- defer once.Do(func() { p.remoteStdin.Close() })
-
- if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil {
- runtime.HandleError(err)
- }
- }()
-
- // read from remoteStdin until the stream is closed. this is essential to
- // be able to exit interactive sessions cleanly and not leak goroutines or
- // hang the client's terminal.
- //
- // TODO we aren't using go-dockerclient any more; revisit this to determine if it's still
- // required by engine-api.
- //
- // go-dockerclient's current hijack implementation
- // (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564)
- // waits for all three streams (stdin/stdout/stderr) to finish copying
- // before returning. When hijack finishes copying stdout/stderr, it calls
- // Close() on its side of remoteStdin, which allows this copy to complete.
- // When that happens, we must Close() on our side of remoteStdin, to
- // allow the copy in hijack to complete, and hijack to return.
- go func() {
- defer runtime.HandleCrash()
- defer once.Do(func() { p.remoteStdin.Close() })
-
- // this "copy" doesn't actually read anything - it's just here to wait for
- // the server to close remoteStdin.
- if _, err := io.Copy(ioutil.Discard, p.remoteStdin); err != nil {
- runtime.HandleError(err)
- }
- }()
- }
-}
-
-func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) {
- if p.Stdout == nil {
- return
- }
-
- wg.Add(1)
- go func() {
- defer runtime.HandleCrash()
- defer wg.Done()
-
- if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil {
- runtime.HandleError(err)
- }
- }()
-}
-
-func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) {
- if p.Stderr == nil || p.Tty {
- return
- }
-
- wg.Add(1)
- go func() {
- defer runtime.HandleCrash()
- defer wg.Done()
-
- if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil {
- runtime.HandleError(err)
- }
- }()
-}
-
-func (p *streamProtocolV2) stream(conn streamCreator) error {
- if err := p.createStreams(conn); err != nil {
- return err
- }
-
- // now that all the streams have been created, proceed with reading & copying
-
- errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{})
-
- p.copyStdin()
-
- var wg sync.WaitGroup
- p.copyStdout(&wg)
- p.copyStderr(&wg)
-
- // we're waiting for stdout/stderr to finish copying
- wg.Wait()
-
- // waits for errorStream to finish reading with an error or nil
- return <-errorChan
-}
-
-// errorDecoderV2 interprets the error channel data as plain text.
-type errorDecoderV2 struct{}
-
-func (d *errorDecoderV2) decode(message []byte) error {
- return fmt.Errorf("error executing remote command: %s", message)
-}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v3.go b/vendor/k8s.io/client-go/tools/remotecommand/v3.go
deleted file mode 100644
index 846dd24a5..000000000
--- a/vendor/k8s.io/client-go/tools/remotecommand/v3.go
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "encoding/json"
- "io"
- "net/http"
- "sync"
-
- "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/util/runtime"
-)
-
-// streamProtocolV3 implements version 3 of the streaming protocol for attach
-// and exec. This version adds support for resizing the container's terminal.
-type streamProtocolV3 struct {
- *streamProtocolV2
-
- resizeStream io.Writer
-}
-
-var _ streamProtocolHandler = &streamProtocolV3{}
-
-func newStreamProtocolV3(options StreamOptions) streamProtocolHandler {
- return &streamProtocolV3{
- streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2),
- }
-}
-
-func (p *streamProtocolV3) createStreams(conn streamCreator) error {
- // set up the streams from v2
- if err := p.streamProtocolV2.createStreams(conn); err != nil {
- return err
- }
-
- // set up resize stream
- if p.Tty {
- headers := http.Header{}
- headers.Set(v1.StreamType, v1.StreamTypeResize)
- var err error
- p.resizeStream, err = conn.CreateStream(headers)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (p *streamProtocolV3) handleResizes() {
- if p.resizeStream == nil || p.TerminalSizeQueue == nil {
- return
- }
- go func() {
- defer runtime.HandleCrash()
-
- encoder := json.NewEncoder(p.resizeStream)
- for {
- size := p.TerminalSizeQueue.Next()
- if size == nil {
- return
- }
- if err := encoder.Encode(&size); err != nil {
- runtime.HandleError(err)
- }
- }
- }()
-}
-
-func (p *streamProtocolV3) stream(conn streamCreator) error {
- if err := p.createStreams(conn); err != nil {
- return err
- }
-
- // now that all the streams have been created, proceed with reading & copying
-
- errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{})
-
- p.handleResizes()
-
- p.copyStdin()
-
- var wg sync.WaitGroup
- p.copyStdout(&wg)
- p.copyStderr(&wg)
-
- // we're waiting for stdout/stderr to finish copying
- wg.Wait()
-
- // waits for errorStream to finish reading with an error or nil
- return <-errorChan
-}
-
-type errorDecoderV3 struct {
- errorDecoderV2
-}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v4.go b/vendor/k8s.io/client-go/tools/remotecommand/v4.go
deleted file mode 100644
index 69ca934a0..000000000
--- a/vendor/k8s.io/client-go/tools/remotecommand/v4.go
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package remotecommand
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
- "sync"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/util/remotecommand"
- "k8s.io/client-go/util/exec"
-)
-
-// streamProtocolV4 implements version 4 of the streaming protocol for attach
-// and exec. This version adds support for exit codes on the error stream through
-// the use of metav1.Status instead of plain text messages.
-type streamProtocolV4 struct {
- *streamProtocolV3
-}
-
-var _ streamProtocolHandler = &streamProtocolV4{}
-
-func newStreamProtocolV4(options StreamOptions) streamProtocolHandler {
- return &streamProtocolV4{
- streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3),
- }
-}
-
-func (p *streamProtocolV4) createStreams(conn streamCreator) error {
- return p.streamProtocolV3.createStreams(conn)
-}
-
-func (p *streamProtocolV4) handleResizes() {
- p.streamProtocolV3.handleResizes()
-}
-
-func (p *streamProtocolV4) stream(conn streamCreator) error {
- if err := p.createStreams(conn); err != nil {
- return err
- }
-
- // now that all the streams have been created, proceed with reading & copying
-
- errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{})
-
- p.handleResizes()
-
- p.copyStdin()
-
- var wg sync.WaitGroup
- p.copyStdout(&wg)
- p.copyStderr(&wg)
-
- // we're waiting for stdout/stderr to finish copying
- wg.Wait()
-
- // waits for errorStream to finish reading with an error or nil
- return <-errorChan
-}
-
-// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel
-// and creates an exec.ExitError from it.
-type errorDecoderV4 struct{}
-
-func (d *errorDecoderV4) decode(message []byte) error {
- status := metav1.Status{}
- err := json.Unmarshal(message, &status)
- if err != nil {
- return fmt.Errorf("error stream protocol error: %v in %q", err, string(message))
- }
- switch status.Status {
- case metav1.StatusSuccess:
- return nil
- case metav1.StatusFailure:
- if status.Reason == remotecommand.NonZeroExitCodeReason {
- if status.Details == nil {
- return errors.New("error stream protocol error: details must be set")
- }
- for i := range status.Details.Causes {
- c := &status.Details.Causes[i]
- if c.Type != remotecommand.ExitCodeCauseType {
- continue
- }
-
- rc, err := strconv.ParseUint(c.Message, 10, 8)
- if err != nil {
- return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message)
- }
- return exec.CodeExitError{
- Err: fmt.Errorf("command terminated with exit code %d", rc),
- Code: int(rc),
- }
- }
-
- return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType)
- }
- default:
- return errors.New("error stream protocol error: unknown error")
- }
-
- return fmt.Errorf(status.Message)
-}
diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS
deleted file mode 100644
index a52176903..000000000
--- a/vendor/k8s.io/client-go/transport/OWNERS
+++ /dev/null
@@ -1,9 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-reviewers:
-- smarterclayton
-- wojtek-t
-- deads2k
-- liggitt
-- krousey
-- caesarxuchao
diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go
deleted file mode 100644
index 7cffe2a5f..000000000
--- a/vendor/k8s.io/client-go/transport/cache.go
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package transport
-
-import (
- "fmt"
- "net"
- "net/http"
- "sync"
- "time"
-
- utilnet "k8s.io/apimachinery/pkg/util/net"
-)
-
-// TlsTransportCache caches TLS http.RoundTrippers different configurations. The
-// same RoundTripper will be returned for configs with identical TLS options If
-// the config has no custom TLS options, http.DefaultTransport is returned.
-type tlsTransportCache struct {
- mu sync.Mutex
- transports map[tlsCacheKey]*http.Transport
-}
-
-const idleConnsPerHost = 25
-
-var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)}
-
-type tlsCacheKey struct {
- insecure bool
- caData string
- certData string
- keyData string
- getCert string
- serverName string
- dial string
-}
-
-func (t tlsCacheKey) String() string {
- keyText := "<none>"
- if len(t.keyData) > 0 {
- keyText = "<redacted>"
- }
- return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial)
-}
-
-func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
- key, err := tlsConfigKey(config)
- if err != nil {
- return nil, err
- }
-
- // Ensure we only create a single transport for the given TLS options
- c.mu.Lock()
- defer c.mu.Unlock()
-
- // See if we already have a custom transport for this config
- if t, ok := c.transports[key]; ok {
- return t, nil
- }
-
- // Get the TLS options for this client config
- tlsConfig, err := TLSConfigFor(config)
- if err != nil {
- return nil, err
- }
- // The options didn't require a custom TLS config
- if tlsConfig == nil && config.Dial == nil {
- return http.DefaultTransport, nil
- }
-
- dial := config.Dial
- if dial == nil {
- dial = (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).DialContext
- }
- // Cache a single transport for these options
- c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{
- Proxy: http.ProxyFromEnvironment,
- TLSHandshakeTimeout: 10 * time.Second,
- TLSClientConfig: tlsConfig,
- MaxIdleConnsPerHost: idleConnsPerHost,
- DialContext: dial,
- })
- return c.transports[key], nil
-}
-
-// tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor
-func tlsConfigKey(c *Config) (tlsCacheKey, error) {
- // Make sure ca/key/cert content is loaded
- if err := loadTLSFiles(c); err != nil {
- return tlsCacheKey{}, err
- }
- return tlsCacheKey{
- insecure: c.TLS.Insecure,
- caData: string(c.TLS.CAData),
- certData: string(c.TLS.CertData),
- keyData: string(c.TLS.KeyData),
- getCert: fmt.Sprintf("%p", c.TLS.GetCert),
- serverName: c.TLS.ServerName,
- dial: fmt.Sprintf("%p", c.Dial),
- }, nil
-}
diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go
deleted file mode 100644
index 5de0a2cb1..000000000
--- a/vendor/k8s.io/client-go/transport/config.go
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package transport
-
-import (
- "context"
- "crypto/tls"
- "net"
- "net/http"
-)
-
-// Config holds various options for establishing a transport.
-type Config struct {
- // UserAgent is an optional field that specifies the caller of this
- // request.
- UserAgent string
-
- // The base TLS configuration for this transport.
- TLS TLSConfig
-
- // Username and password for basic authentication
- Username string
- Password string
-
- // Bearer token for authentication
- BearerToken string
-
- // Path to a file containing a BearerToken.
- // If set, the contents are periodically read.
- // The last successfully read value takes precedence over BearerToken.
- BearerTokenFile string
-
- // Impersonate is the config that this Config will impersonate using
- Impersonate ImpersonationConfig
-
- // Transport may be used for custom HTTP behavior. This attribute may
- // not be specified with the TLS client certificate options. Use
- // WrapTransport for most client level operations.
- Transport http.RoundTripper
-
- // WrapTransport will be invoked for custom HTTP behavior after the
- // underlying transport is initialized (either the transport created
- // from TLSClientConfig, Transport, or http.DefaultTransport). The
- // config may layer other RoundTrippers on top of the returned
- // RoundTripper.
- //
- // A future release will change this field to an array. Use config.Wrap()
- // instead of setting this value directly.
- WrapTransport WrapperFunc
-
- // Dial specifies the dial function for creating unencrypted TCP connections.
- Dial func(ctx context.Context, network, address string) (net.Conn, error)
-}
-
-// ImpersonationConfig has all the available impersonation options
-type ImpersonationConfig struct {
- // UserName matches user.Info.GetName()
- UserName string
- // Groups matches user.Info.GetGroups()
- Groups []string
- // Extra matches user.Info.GetExtra()
- Extra map[string][]string
-}
-
-// HasCA returns whether the configuration has a certificate authority or not.
-func (c *Config) HasCA() bool {
- return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0
-}
-
-// HasBasicAuth returns whether the configuration has basic authentication or not.
-func (c *Config) HasBasicAuth() bool {
- return len(c.Username) != 0
-}
-
-// HasTokenAuth returns whether the configuration has token authentication or not.
-func (c *Config) HasTokenAuth() bool {
- return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0
-}
-
-// HasCertAuth returns whether the configuration has certificate authentication or not.
-func (c *Config) HasCertAuth() bool {
- return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0)
-}
-
-// HasCertCallbacks returns whether the configuration has certificate callback or not.
-func (c *Config) HasCertCallback() bool {
- return c.TLS.GetCert != nil
-}
-
-// Wrap adds a transport middleware function that will give the caller
-// an opportunity to wrap the underlying http.RoundTripper prior to the
-// first API call being made. The provided function is invoked after any
-// existing transport wrappers are invoked.
-func (c *Config) Wrap(fn WrapperFunc) {
- c.WrapTransport = Wrappers(c.WrapTransport, fn)
-}
-
-// TLSConfig holds the information needed to set up a TLS transport.
-type TLSConfig struct {
- CAFile string // Path of the PEM-encoded server trusted root certificates.
- CertFile string // Path of the PEM-encoded client certificate.
- KeyFile string // Path of the PEM-encoded client key.
-
- Insecure bool // Server should be accessed without verifying the certificate. For testing only.
- ServerName string // Override for the server name passed to the server for SNI and used to verify certificates.
-
- CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile.
- CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile.
- KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile.
-
- GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field.
-}
diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go
deleted file mode 100644
index 117a9c8c4..000000000
--- a/vendor/k8s.io/client-go/transport/round_trippers.go
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package transport
-
-import (
- "fmt"
- "net/http"
- "strings"
- "time"
-
- "golang.org/x/oauth2"
- "k8s.io/klog"
-
- utilnet "k8s.io/apimachinery/pkg/util/net"
-)
-
-// HTTPWrappersForConfig wraps a round tripper with any relevant layered
-// behavior from the config. Exposed to allow more clients that need HTTP-like
-// behavior but then must hijack the underlying connection (like WebSocket or
-// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from
-// New.
-func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) {
- if config.WrapTransport != nil {
- rt = config.WrapTransport(rt)
- }
-
- rt = DebugWrappers(rt)
-
- // Set authentication wrappers
- switch {
- case config.HasBasicAuth() && config.HasTokenAuth():
- return nil, fmt.Errorf("username/password or bearer token may be set, but not both")
- case config.HasTokenAuth():
- var err error
- rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt)
- if err != nil {
- return nil, err
- }
- case config.HasBasicAuth():
- rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt)
- }
- if len(config.UserAgent) > 0 {
- rt = NewUserAgentRoundTripper(config.UserAgent, rt)
- }
- if len(config.Impersonate.UserName) > 0 ||
- len(config.Impersonate.Groups) > 0 ||
- len(config.Impersonate.Extra) > 0 {
- rt = NewImpersonatingRoundTripper(config.Impersonate, rt)
- }
- return rt, nil
-}
-
-// DebugWrappers wraps a round tripper and logs based on the current log level.
-func DebugWrappers(rt http.RoundTripper) http.RoundTripper {
- switch {
- case bool(klog.V(9)):
- rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders)
- case bool(klog.V(8)):
- rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders)
- case bool(klog.V(7)):
- rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus)
- case bool(klog.V(6)):
- rt = newDebuggingRoundTripper(rt, debugURLTiming)
- }
-
- return rt
-}
-
-type requestCanceler interface {
- CancelRequest(*http.Request)
-}
-
-type authProxyRoundTripper struct {
- username string
- groups []string
- extra map[string][]string
-
- rt http.RoundTripper
-}
-
-// NewAuthProxyRoundTripper provides a roundtripper which will add auth proxy fields to requests for
-// authentication terminating proxy cases
-// assuming you pull the user from the context:
-// username is the user.Info.GetName() of the user
-// groups is the user.Info.GetGroups() of the user
-// extra is the user.Info.GetExtra() of the user
-// extra can contain any additional information that the authenticator
-// thought was interesting, for example authorization scopes.
-// In order to faithfully round-trip through an impersonation flow, these keys
-// MUST be lowercase.
-func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper {
- return &authProxyRoundTripper{
- username: username,
- groups: groups,
- extra: extra,
- rt: rt,
- }
-}
-
-func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- req = utilnet.CloneRequest(req)
- SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra)
-
- return rt.rt.RoundTrip(req)
-}
-
-// SetAuthProxyHeaders stomps the auth proxy header fields. It mutates its argument.
-func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) {
- req.Header.Del("X-Remote-User")
- req.Header.Del("X-Remote-Group")
- for key := range req.Header {
- if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) {
- req.Header.Del(key)
- }
- }
-
- req.Header.Set("X-Remote-User", username)
- for _, group := range groups {
- req.Header.Add("X-Remote-Group", group)
- }
- for key, values := range extra {
- for _, value := range values {
- req.Header.Add("X-Remote-Extra-"+headerKeyEscape(key), value)
- }
- }
-}
-
-func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) {
- if canceler, ok := rt.rt.(requestCanceler); ok {
- canceler.CancelRequest(req)
- } else {
- klog.Errorf("CancelRequest not implemented by %T", rt.rt)
- }
-}
-
-func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
-
-type userAgentRoundTripper struct {
- agent string
- rt http.RoundTripper
-}
-
-func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper {
- return &userAgentRoundTripper{agent, rt}
-}
-
-func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- if len(req.Header.Get("User-Agent")) != 0 {
- return rt.rt.RoundTrip(req)
- }
- req = utilnet.CloneRequest(req)
- req.Header.Set("User-Agent", rt.agent)
- return rt.rt.RoundTrip(req)
-}
-
-func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) {
- if canceler, ok := rt.rt.(requestCanceler); ok {
- canceler.CancelRequest(req)
- } else {
- klog.Errorf("CancelRequest not implemented by %T", rt.rt)
- }
-}
-
-func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
-
-type basicAuthRoundTripper struct {
- username string
- password string
- rt http.RoundTripper
-}
-
-// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a
-// request unless it has already been set.
-func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper {
- return &basicAuthRoundTripper{username, password, rt}
-}
-
-func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- if len(req.Header.Get("Authorization")) != 0 {
- return rt.rt.RoundTrip(req)
- }
- req = utilnet.CloneRequest(req)
- req.SetBasicAuth(rt.username, rt.password)
- return rt.rt.RoundTrip(req)
-}
-
-func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) {
- if canceler, ok := rt.rt.(requestCanceler); ok {
- canceler.CancelRequest(req)
- } else {
- klog.Errorf("CancelRequest not implemented by %T", rt.rt)
- }
-}
-
-func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
-
-// These correspond to the headers used in pkg/apis/authentication. We don't want the package dependency,
-// but you must not change the values.
-const (
- // ImpersonateUserHeader is used to impersonate a particular user during an API server request
- ImpersonateUserHeader = "Impersonate-User"
-
- // ImpersonateGroupHeader is used to impersonate a particular group during an API server request.
- // It can be repeated multiplied times for multiple groups.
- ImpersonateGroupHeader = "Impersonate-Group"
-
- // ImpersonateUserExtraHeaderPrefix is a prefix for a header used to impersonate an entry in the
- // extra map[string][]string for user.Info. The key for the `extra` map is suffix.
- // The same key can be repeated multiple times to have multiple elements in the slice under a single key.
- // For instance:
- // Impersonate-Extra-Foo: one
- // Impersonate-Extra-Foo: two
- // results in extra["Foo"] = []string{"one", "two"}
- ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-"
-)
-
-type impersonatingRoundTripper struct {
- impersonate ImpersonationConfig
- delegate http.RoundTripper
-}
-
-// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set.
-func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper {
- return &impersonatingRoundTripper{impersonate, delegate}
-}
-
-func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- // use the user header as marker for the rest.
- if len(req.Header.Get(ImpersonateUserHeader)) != 0 {
- return rt.delegate.RoundTrip(req)
- }
- req = utilnet.CloneRequest(req)
- req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName)
-
- for _, group := range rt.impersonate.Groups {
- req.Header.Add(ImpersonateGroupHeader, group)
- }
- for k, vv := range rt.impersonate.Extra {
- for _, v := range vv {
- req.Header.Add(ImpersonateUserExtraHeaderPrefix+headerKeyEscape(k), v)
- }
- }
-
- return rt.delegate.RoundTrip(req)
-}
-
-func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) {
- if canceler, ok := rt.delegate.(requestCanceler); ok {
- canceler.CancelRequest(req)
- } else {
- klog.Errorf("CancelRequest not implemented by %T", rt.delegate)
- }
-}
-
-func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate }
-
-type bearerAuthRoundTripper struct {
- bearer string
- source oauth2.TokenSource
- rt http.RoundTripper
-}
-
-// NewBearerAuthRoundTripper adds the provided bearer token to a request
-// unless the authorization header has already been set.
-func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper {
- return &bearerAuthRoundTripper{bearer, nil, rt}
-}
-
-// NewBearerAuthRoundTripper adds the provided bearer token to a request
-// unless the authorization header has already been set.
-// If tokenFile is non-empty, it is periodically read,
-// and the last successfully read content is used as the bearer token.
-// If tokenFile is non-empty and bearer is empty, the tokenFile is read
-// immediately to populate the initial bearer token.
-func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) {
- if len(tokenFile) == 0 {
- return &bearerAuthRoundTripper{bearer, nil, rt}, nil
- }
- source := NewCachedFileTokenSource(tokenFile)
- if len(bearer) == 0 {
- token, err := source.Token()
- if err != nil {
- return nil, err
- }
- bearer = token.AccessToken
- }
- return &bearerAuthRoundTripper{bearer, source, rt}, nil
-}
-
-func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- if len(req.Header.Get("Authorization")) != 0 {
- return rt.rt.RoundTrip(req)
- }
-
- req = utilnet.CloneRequest(req)
- token := rt.bearer
- if rt.source != nil {
- if refreshedToken, err := rt.source.Token(); err == nil {
- token = refreshedToken.AccessToken
- }
- }
- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
- return rt.rt.RoundTrip(req)
-}
-
-func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) {
- if canceler, ok := rt.rt.(requestCanceler); ok {
- canceler.CancelRequest(req)
- } else {
- klog.Errorf("CancelRequest not implemented by %T", rt.rt)
- }
-}
-
-func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
-
-// requestInfo keeps track of information about a request/response combination
-type requestInfo struct {
- RequestHeaders http.Header
- RequestVerb string
- RequestURL string
-
- ResponseStatus string
- ResponseHeaders http.Header
- ResponseErr error
-
- Duration time.Duration
-}
-
-// newRequestInfo creates a new RequestInfo based on an http request
-func newRequestInfo(req *http.Request) *requestInfo {
- return &requestInfo{
- RequestURL: req.URL.String(),
- RequestVerb: req.Method,
- RequestHeaders: req.Header,
- }
-}
-
-// complete adds information about the response to the requestInfo
-func (r *requestInfo) complete(response *http.Response, err error) {
- if err != nil {
- r.ResponseErr = err
- return
- }
- r.ResponseStatus = response.Status
- r.ResponseHeaders = response.Header
-}
-
-// toCurl returns a string that can be run as a command in a terminal (minus the body)
-func (r *requestInfo) toCurl() string {
- headers := ""
- for key, values := range r.RequestHeaders {
- for _, value := range values {
- headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value))
- }
- }
-
- return fmt.Sprintf("curl -k -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL)
-}
-
-// debuggingRoundTripper will display information about the requests passing
-// through it based on what is configured
-type debuggingRoundTripper struct {
- delegatedRoundTripper http.RoundTripper
-
- levels map[debugLevel]bool
-}
-
-type debugLevel int
-
-const (
- debugJustURL debugLevel = iota
- debugURLTiming
- debugCurlCommand
- debugRequestHeaders
- debugResponseStatus
- debugResponseHeaders
-)
-
-func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper {
- drt := &debuggingRoundTripper{
- delegatedRoundTripper: rt,
- levels: make(map[debugLevel]bool, len(levels)),
- }
- for _, v := range levels {
- drt.levels[v] = true
- }
- return drt
-}
-
-func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) {
- if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok {
- canceler.CancelRequest(req)
- } else {
- klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper)
- }
-}
-
-func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- reqInfo := newRequestInfo(req)
-
- if rt.levels[debugJustURL] {
- klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL)
- }
- if rt.levels[debugCurlCommand] {
- klog.Infof("%s", reqInfo.toCurl())
-
- }
- if rt.levels[debugRequestHeaders] {
- klog.Infof("Request Headers:")
- for key, values := range reqInfo.RequestHeaders {
- for _, value := range values {
- klog.Infof(" %s: %s", key, value)
- }
- }
- }
-
- startTime := time.Now()
- response, err := rt.delegatedRoundTripper.RoundTrip(req)
- reqInfo.Duration = time.Since(startTime)
-
- reqInfo.complete(response, err)
-
- if rt.levels[debugURLTiming] {
- klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
- }
- if rt.levels[debugResponseStatus] {
- klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
- }
- if rt.levels[debugResponseHeaders] {
- klog.Infof("Response Headers:")
- for key, values := range reqInfo.ResponseHeaders {
- for _, value := range values {
- klog.Infof(" %s: %s", key, value)
- }
- }
- }
-
- return response, err
-}
-
-func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper {
- return rt.delegatedRoundTripper
-}
-
-func legalHeaderByte(b byte) bool {
- return int(b) < len(legalHeaderKeyBytes) && legalHeaderKeyBytes[b]
-}
-
-func shouldEscape(b byte) bool {
- // url.PathUnescape() returns an error if any '%' is not followed by two
- // hexadecimal digits, so we'll intentionally encode it.
- return !legalHeaderByte(b) || b == '%'
-}
-
-func headerKeyEscape(key string) string {
- buf := strings.Builder{}
- for i := 0; i < len(key); i++ {
- b := key[i]
- if shouldEscape(b) {
- // %-encode bytes that should be escaped:
- // https://tools.ietf.org/html/rfc3986#section-2.1
- fmt.Fprintf(&buf, "%%%02X", b)
- continue
- }
- buf.WriteByte(b)
- }
- return buf.String()
-}
-
-// legalHeaderKeyBytes was copied from net/http/lex.go's isTokenTable.
-// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators
-var legalHeaderKeyBytes = [127]bool{
- '%': true,
- '!': true,
- '#': true,
- '$': true,
- '&': true,
- '\'': true,
- '*': true,
- '+': true,
- '-': true,
- '.': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'W': true,
- 'V': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '|': true,
- '~': true,
-}
diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go
deleted file mode 100644
index 53cc7ee18..000000000
--- a/vendor/k8s.io/client-go/transport/spdy/spdy.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spdy
-
-import (
- "fmt"
- "net/http"
- "net/url"
-
- "k8s.io/apimachinery/pkg/util/httpstream"
- "k8s.io/apimachinery/pkg/util/httpstream/spdy"
- restclient "k8s.io/client-go/rest"
-)
-
-// Upgrader validates a response from the server after a SPDY upgrade.
-type Upgrader interface {
- // NewConnection validates the response and creates a new Connection.
- NewConnection(resp *http.Response) (httpstream.Connection, error)
-}
-
-// RoundTripperFor returns a round tripper and upgrader to use with SPDY.
-func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, error) {
- tlsConfig, err := restclient.TLSConfigFor(config)
- if err != nil {
- return nil, nil, err
- }
- upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true, false)
- wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper)
- if err != nil {
- return nil, nil, err
- }
- return wrapper, upgradeRoundTripper, nil
-}
-
-// dialer implements the httpstream.Dialer interface.
-type dialer struct {
- client *http.Client
- upgrader Upgrader
- method string
- url *url.URL
-}
-
-var _ httpstream.Dialer = &dialer{}
-
-// NewDialer will create a dialer that connects to the provided URL and upgrades the connection to SPDY.
-func NewDialer(upgrader Upgrader, client *http.Client, method string, url *url.URL) httpstream.Dialer {
- return &dialer{
- client: client,
- upgrader: upgrader,
- method: method,
- url: url,
- }
-}
-
-func (d *dialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
- req, err := http.NewRequest(d.method, d.url.String(), nil)
- if err != nil {
- return nil, "", fmt.Errorf("error creating request: %v", err)
- }
- return Negotiate(d.upgrader, d.client, req, protocols...)
-}
-
-// Negotiate opens a connection to a remote server and attempts to negotiate
-// a SPDY connection. Upon success, it returns the connection and the protocol selected by
-// the server. The client transport must use the upgradeRoundTripper - see RoundTripperFor.
-func Negotiate(upgrader Upgrader, client *http.Client, req *http.Request, protocols ...string) (httpstream.Connection, string, error) {
- for i := range protocols {
- req.Header.Add(httpstream.HeaderProtocolVersion, protocols[i])
- }
- resp, err := client.Do(req)
- if err != nil {
- return nil, "", fmt.Errorf("error sending request: %v", err)
- }
- defer resp.Body.Close()
- conn, err := upgrader.NewConnection(resp)
- if err != nil {
- return nil, "", err
- }
- return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil
-}
diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go
deleted file mode 100644
index b8cadd382..000000000
--- a/vendor/k8s.io/client-go/transport/token_source.go
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package transport
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/oauth2"
- "k8s.io/klog"
-)
-
-// TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens
-// authentication from an oauth2.TokenSource.
-func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) http.RoundTripper {
- return func(rt http.RoundTripper) http.RoundTripper {
- return &tokenSourceTransport{
- base: rt,
- ort: &oauth2.Transport{
- Source: ts,
- Base: rt,
- },
- }
- }
-}
-
-// NewCachedFileTokenSource returns a oauth2.TokenSource reads a token from a
-// file at a specified path and periodically reloads it.
-func NewCachedFileTokenSource(path string) oauth2.TokenSource {
- return &cachingTokenSource{
- now: time.Now,
- leeway: 10 * time.Second,
- base: &fileTokenSource{
- path: path,
- // This period was picked because it is half of the duration between when the kubelet
- // refreshes a projected service account token and when the original token expires.
- // Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime.
- // This should induce re-reading at a frequency that works with the token volume source.
- period: time.Minute,
- },
- }
-}
-
-// NewCachedTokenSource returns a oauth2.TokenSource reads a token from a
-// designed TokenSource. The ts would provide the source of token.
-func NewCachedTokenSource(ts oauth2.TokenSource) oauth2.TokenSource {
- return &cachingTokenSource{
- now: time.Now,
- base: ts,
- }
-}
-
-type tokenSourceTransport struct {
- base http.RoundTripper
- ort http.RoundTripper
-}
-
-func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- // This is to allow --token to override other bearer token providers.
- if req.Header.Get("Authorization") != "" {
- return tst.base.RoundTrip(req)
- }
- return tst.ort.RoundTrip(req)
-}
-
-type fileTokenSource struct {
- path string
- period time.Duration
-}
-
-var _ = oauth2.TokenSource(&fileTokenSource{})
-
-func (ts *fileTokenSource) Token() (*oauth2.Token, error) {
- tokb, err := ioutil.ReadFile(ts.path)
- if err != nil {
- return nil, fmt.Errorf("failed to read token file %q: %v", ts.path, err)
- }
- tok := strings.TrimSpace(string(tokb))
- if len(tok) == 0 {
- return nil, fmt.Errorf("read empty token from file %q", ts.path)
- }
-
- return &oauth2.Token{
- AccessToken: tok,
- Expiry: time.Now().Add(ts.period),
- }, nil
-}
-
-type cachingTokenSource struct {
- base oauth2.TokenSource
- leeway time.Duration
-
- sync.RWMutex
- tok *oauth2.Token
-
- // for testing
- now func() time.Time
-}
-
-var _ = oauth2.TokenSource(&cachingTokenSource{})
-
-func (ts *cachingTokenSource) Token() (*oauth2.Token, error) {
- now := ts.now()
- // fast path
- ts.RLock()
- tok := ts.tok
- ts.RUnlock()
-
- if tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) {
- return tok, nil
- }
-
- // slow path
- ts.Lock()
- defer ts.Unlock()
- if tok := ts.tok; tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) {
- return tok, nil
- }
-
- tok, err := ts.base.Token()
- if err != nil {
- if ts.tok == nil {
- return nil, err
- }
- klog.Errorf("Unable to rotate token: %v", err)
- return ts.tok, nil
- }
-
- ts.tok = tok
- return tok, nil
-}
diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go
deleted file mode 100644
index 2a145c971..000000000
--- a/vendor/k8s.io/client-go/transport/transport.go
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package transport
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "net/http"
-)
-
-// New returns an http.RoundTripper that will provide the authentication
-// or transport level security defined by the provided Config.
-func New(config *Config) (http.RoundTripper, error) {
- // Set transport level security
- if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.HasCertCallback() || config.TLS.Insecure) {
- return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed")
- }
-
- var (
- rt http.RoundTripper
- err error
- )
-
- if config.Transport != nil {
- rt = config.Transport
- } else {
- rt, err = tlsCache.get(config)
- if err != nil {
- return nil, err
- }
- }
-
- return HTTPWrappersForConfig(config, rt)
-}
-
-// TLSConfigFor returns a tls.Config that will provide the transport level security defined
-// by the provided Config. Will return nil if no transport level security is requested.
-func TLSConfigFor(c *Config) (*tls.Config, error) {
- if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) {
- return nil, nil
- }
- if c.HasCA() && c.TLS.Insecure {
- return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed")
- }
- if err := loadTLSFiles(c); err != nil {
- return nil, err
- }
-
- tlsConfig := &tls.Config{
- // Can't use SSLv3 because of POODLE and BEAST
- // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
- // Can't use TLSv1.1 because of RC4 cipher usage
- MinVersion: tls.VersionTLS12,
- InsecureSkipVerify: c.TLS.Insecure,
- ServerName: c.TLS.ServerName,
- }
-
- if c.HasCA() {
- tlsConfig.RootCAs = rootCertPool(c.TLS.CAData)
- }
-
- var staticCert *tls.Certificate
- if c.HasCertAuth() {
- // If key/cert were provided, verify them before setting up
- // tlsConfig.GetClientCertificate.
- cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData)
- if err != nil {
- return nil, err
- }
- staticCert = &cert
- }
-
- if c.HasCertAuth() || c.HasCertCallback() {
- tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
- // Note: static key/cert data always take precedence over cert
- // callback.
- if staticCert != nil {
- return staticCert, nil
- }
- if c.HasCertCallback() {
- cert, err := c.TLS.GetCert()
- if err != nil {
- return nil, err
- }
- // GetCert may return empty value, meaning no cert.
- if cert != nil {
- return cert, nil
- }
- }
-
- // Both c.TLS.CertData/KeyData were unset and GetCert didn't return
- // anything. Return an empty tls.Certificate, no client cert will
- // be sent to the server.
- return &tls.Certificate{}, nil
- }
- }
-
- return tlsConfig, nil
-}
-
-// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
-// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
-// either populated or were empty to start.
-func loadTLSFiles(c *Config) error {
- var err error
- c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile)
- if err != nil {
- return err
- }
-
- c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile)
- if err != nil {
- return err
- }
-
- c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile)
- if err != nil {
- return err
- }
- return nil
-}
-
-// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
-// or an error if an error occurred reading the file
-func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
- if len(data) > 0 {
- return data, nil
- }
- if len(file) > 0 {
- fileData, err := ioutil.ReadFile(file)
- if err != nil {
- return []byte{}, err
- }
- return fileData, nil
- }
- return nil, nil
-}
-
-// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs".
-// When caData is not empty, it will be the ONLY information used in the CertPool.
-func rootCertPool(caData []byte) *x509.CertPool {
- // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go
- // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values
- // It doesn't allow trusting either/or, but hopefully that won't be an issue
- if len(caData) == 0 {
- return nil
- }
-
- // if we have caData, use it
- certPool := x509.NewCertPool()
- certPool.AppendCertsFromPEM(caData)
- return certPool
-}
-
-// WrapperFunc wraps an http.RoundTripper when a new transport
-// is created for a client, allowing per connection behavior
-// to be injected.
-type WrapperFunc func(rt http.RoundTripper) http.RoundTripper
-
-// Wrappers accepts any number of wrappers and returns a wrapper
-// function that is the equivalent of calling each of them in order. Nil
-// values are ignored, which makes this function convenient for incrementally
-// wrapping a function.
-func Wrappers(fns ...WrapperFunc) WrapperFunc {
- if len(fns) == 0 {
- return nil
- }
- // optimize the common case of wrapping a possibly nil transport wrapper
- // with an additional wrapper
- if len(fns) == 2 && fns[0] == nil {
- return fns[1]
- }
- return func(rt http.RoundTripper) http.RoundTripper {
- base := rt
- for _, fn := range fns {
- if fn != nil {
- base = fn(base)
- }
- }
- return base
- }
-}
-
-// ContextCanceller prevents new requests after the provided context is finished.
-// err is returned when the context is closed, allowing the caller to provide a context
-// appropriate error.
-func ContextCanceller(ctx context.Context, err error) WrapperFunc {
- return func(rt http.RoundTripper) http.RoundTripper {
- return &contextCanceller{
- ctx: ctx,
- rt: rt,
- err: err,
- }
- }
-}
-
-type contextCanceller struct {
- ctx context.Context
- rt http.RoundTripper
- err error
-}
-
-func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) {
- select {
- case <-b.ctx.Done():
- return nil, b.err
- default:
- return b.rt.RoundTrip(req)
- }
-}
diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS
deleted file mode 100644
index 3cf036438..000000000
--- a/vendor/k8s.io/client-go/util/cert/OWNERS
+++ /dev/null
@@ -1,9 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-approvers:
-- sig-auth-certificates-approvers
-reviewers:
-- sig-auth-certificates-reviewers
-labels:
-- sig/auth
-
diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go
deleted file mode 100644
index 9fd097af5..000000000
--- a/vendor/k8s.io/client-go/util/cert/cert.go
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cert
-
-import (
- "bytes"
- "crypto"
- cryptorand "crypto/rand"
- "crypto/rsa"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/pem"
- "fmt"
- "io/ioutil"
- "math/big"
- "net"
- "path"
- "strings"
- "time"
-
- "k8s.io/client-go/util/keyutil"
-)
-
-const duration365d = time.Hour * 24 * 365
-
-// Config contains the basic fields required for creating a certificate
-type Config struct {
- CommonName string
- Organization []string
- AltNames AltNames
- Usages []x509.ExtKeyUsage
-}
-
-// AltNames contains the domain names and IP addresses that will be added
-// to the API Server's x509 certificate SubAltNames field. The values will
-// be passed directly to the x509.Certificate object.
-type AltNames struct {
- DNSNames []string
- IPs []net.IP
-}
-
-// NewSelfSignedCACert creates a CA certificate
-func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
- now := time.Now()
- tmpl := x509.Certificate{
- SerialNumber: new(big.Int).SetInt64(0),
- Subject: pkix.Name{
- CommonName: cfg.CommonName,
- Organization: cfg.Organization,
- },
- NotBefore: now.UTC(),
- NotAfter: now.Add(duration365d * 10).UTC(),
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
- BasicConstraintsValid: true,
- IsCA: true,
- }
-
- certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
- if err != nil {
- return nil, err
- }
- return x509.ParseCertificate(certDERBytes)
-}
-
-// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
-// Host may be an IP or a DNS name
-// You may also specify additional subject alt names (either ip or dns names) for the certificate.
-func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
- return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "")
-}
-
-// GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host.
-// Host may be an IP or a DNS name. You may also specify additional subject alt names (either ip or dns names)
-// for the certificate.
-//
-// If fixtureDirectory is non-empty, it is a directory path which can contain pre-generated certs. The format is:
-// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.crt
-// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.key
-// Certs/keys not existing in that directory are created.
-func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) {
- validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew
- maxAge := time.Hour * 24 * 365 // one year self-signed certs
-
- baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
- certFixturePath := path.Join(fixtureDirectory, baseName+".crt")
- keyFixturePath := path.Join(fixtureDirectory, baseName+".key")
- if len(fixtureDirectory) > 0 {
- cert, err := ioutil.ReadFile(certFixturePath)
- if err == nil {
- key, err := ioutil.ReadFile(keyFixturePath)
- if err == nil {
- return cert, key, nil
- }
- return nil, nil, fmt.Errorf("cert %s can be read, but key %s cannot: %v", certFixturePath, keyFixturePath, err)
- }
- maxAge = 100 * time.Hour * 24 * 365 // 100 years fixtures
- }
-
- caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
- if err != nil {
- return nil, nil, err
- }
-
- caTemplate := x509.Certificate{
- SerialNumber: big.NewInt(1),
- Subject: pkix.Name{
- CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()),
- },
- NotBefore: validFrom,
- NotAfter: validFrom.Add(maxAge),
-
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
- BasicConstraintsValid: true,
- IsCA: true,
- }
-
- caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)
- if err != nil {
- return nil, nil, err
- }
-
- caCertificate, err := x509.ParseCertificate(caDERBytes)
- if err != nil {
- return nil, nil, err
- }
-
- priv, err := rsa.GenerateKey(cryptorand.Reader, 2048)
- if err != nil {
- return nil, nil, err
- }
-
- template := x509.Certificate{
- SerialNumber: big.NewInt(2),
- Subject: pkix.Name{
- CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()),
- },
- NotBefore: validFrom,
- NotAfter: validFrom.Add(maxAge),
-
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
- ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
- BasicConstraintsValid: true,
- }
-
- if ip := net.ParseIP(host); ip != nil {
- template.IPAddresses = append(template.IPAddresses, ip)
- } else {
- template.DNSNames = append(template.DNSNames, host)
- }
-
- template.IPAddresses = append(template.IPAddresses, alternateIPs...)
- template.DNSNames = append(template.DNSNames, alternateDNS...)
-
- derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, caCertificate, &priv.PublicKey, caKey)
- if err != nil {
- return nil, nil, err
- }
-
- // Generate cert, followed by ca
- certBuffer := bytes.Buffer{}
- if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil {
- return nil, nil, err
- }
- if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: caDERBytes}); err != nil {
- return nil, nil, err
- }
-
- // Generate key
- keyBuffer := bytes.Buffer{}
- if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
- return nil, nil, err
- }
-
- if len(fixtureDirectory) > 0 {
- if err := ioutil.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil {
- return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err)
- }
- if err := ioutil.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil {
- return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err)
- }
- }
-
- return certBuffer.Bytes(), keyBuffer.Bytes(), nil
-}
-
-func ipsToStrings(ips []net.IP) []string {
- ss := make([]string, 0, len(ips))
- for _, ip := range ips {
- ss = append(ss, ip.String())
- }
- return ss
-}
diff --git a/vendor/k8s.io/client-go/util/cert/csr.go b/vendor/k8s.io/client-go/util/cert/csr.go
deleted file mode 100644
index 39a6751f7..000000000
--- a/vendor/k8s.io/client-go/util/cert/csr.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cert
-
-import (
- cryptorand "crypto/rand"
- "crypto/rsa"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/pem"
- "net"
-)
-
-// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs.
-// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.)
-func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) {
- template := &x509.CertificateRequest{
- Subject: *subject,
- DNSNames: dnsSANs,
- IPAddresses: ipSANs,
- }
-
- return MakeCSRFromTemplate(privateKey, template)
-}
-
-// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private
-// key and certificate request as a template. All key types that are
-// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey
-// and *ecdsa.PrivateKey.)
-func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) {
- t := *template
- t.SignatureAlgorithm = sigType(privateKey)
-
- csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey)
- if err != nil {
- return nil, err
- }
-
- csrPemBlock := &pem.Block{
- Type: CertificateRequestBlockType,
- Bytes: csrDER,
- }
-
- return pem.EncodeToMemory(csrPemBlock), nil
-}
-
-func sigType(privateKey interface{}) x509.SignatureAlgorithm {
- // Customize the signature for RSA keys, depending on the key size
- if privateKey, ok := privateKey.(*rsa.PrivateKey); ok {
- keySize := privateKey.N.BitLen()
- switch {
- case keySize >= 4096:
- return x509.SHA512WithRSA
- case keySize >= 3072:
- return x509.SHA384WithRSA
- default:
- return x509.SHA256WithRSA
- }
- }
- return x509.UnknownSignatureAlgorithm
-}
diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go
deleted file mode 100644
index 5efb24894..000000000
--- a/vendor/k8s.io/client-go/util/cert/io.go
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cert
-
-import (
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-)
-
-// CanReadCertAndKey returns true if the certificate and key files already exists,
-// otherwise returns false. If lost one of cert and key, returns error.
-func CanReadCertAndKey(certPath, keyPath string) (bool, error) {
- certReadable := canReadFile(certPath)
- keyReadable := canReadFile(keyPath)
-
- if certReadable == false && keyReadable == false {
- return false, nil
- }
-
- if certReadable == false {
- return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath)
- }
-
- if keyReadable == false {
- return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath)
- }
-
- return true, nil
-}
-
-// If the file represented by path exists and
-// readable, returns true otherwise returns false.
-func canReadFile(path string) bool {
- f, err := os.Open(path)
- if err != nil {
- return false
- }
-
- defer f.Close()
-
- return true
-}
-
-// WriteCert writes the pem-encoded certificate data to certPath.
-// The certificate file will be created with file mode 0644.
-// If the certificate file already exists, it will be overwritten.
-// The parent directory of the certPath will be created as needed with file mode 0755.
-func WriteCert(certPath string, data []byte) error {
- if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {
- return err
- }
- return ioutil.WriteFile(certPath, data, os.FileMode(0644))
-}
-
-// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
-// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
-func NewPool(filename string) (*x509.CertPool, error) {
- certs, err := CertsFromFile(filename)
- if err != nil {
- return nil, err
- }
- pool := x509.NewCertPool()
- for _, cert := range certs {
- pool.AddCert(cert)
- }
- return pool, nil
-}
-
-// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file.
-// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
-func CertsFromFile(file string) ([]*x509.Certificate, error) {
- pemBlock, err := ioutil.ReadFile(file)
- if err != nil {
- return nil, err
- }
- certs, err := ParseCertsPEM(pemBlock)
- if err != nil {
- return nil, fmt.Errorf("error reading %s: %s", file, err)
- }
- return certs, nil
-}
diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go
deleted file mode 100644
index 9185e2e22..000000000
--- a/vendor/k8s.io/client-go/util/cert/pem.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cert
-
-import (
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-const (
- // CertificateBlockType is a possible value for pem.Block.Type.
- CertificateBlockType = "CERTIFICATE"
- // CertificateRequestBlockType is a possible value for pem.Block.Type.
- CertificateRequestBlockType = "CERTIFICATE REQUEST"
-)
-
-// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
-// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
-func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
- ok := false
- certs := []*x509.Certificate{}
- for len(pemCerts) > 0 {
- var block *pem.Block
- block, pemCerts = pem.Decode(pemCerts)
- if block == nil {
- break
- }
- // Only use PEM "CERTIFICATE" blocks without extra headers
- if block.Type != CertificateBlockType || len(block.Headers) != 0 {
- continue
- }
-
- cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return certs, err
- }
-
- certs = append(certs, cert)
- ok = true
- }
-
- if !ok {
- return certs, errors.New("data does not contain any valid RSA or ECDSA certificates")
- }
- return certs, nil
-}
diff --git a/vendor/k8s.io/client-go/util/connrotation/connrotation.go b/vendor/k8s.io/client-go/util/connrotation/connrotation.go
deleted file mode 100644
index 235a9e019..000000000
--- a/vendor/k8s.io/client-go/util/connrotation/connrotation.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package connrotation implements a connection dialer that tracks and can close
-// all created connections.
-//
-// This is used for credential rotation of long-lived connections, when there's
-// no way to re-authenticate on a live connection.
-package connrotation
-
-import (
- "context"
- "net"
- "sync"
-)
-
-// DialFunc is a shorthand for signature of net.DialContext.
-type DialFunc func(ctx context.Context, network, address string) (net.Conn, error)
-
-// Dialer opens connections through Dial and tracks them.
-type Dialer struct {
- dial DialFunc
-
- mu sync.Mutex
- conns map[*closableConn]struct{}
-}
-
-// NewDialer creates a new Dialer instance.
-//
-// If dial is not nil, it will be used to create new underlying connections.
-// Otherwise net.DialContext is used.
-func NewDialer(dial DialFunc) *Dialer {
- return &Dialer{
- dial: dial,
- conns: make(map[*closableConn]struct{}),
- }
-}
-
-// CloseAll forcibly closes all tracked connections.
-//
-// Note: new connections may get created before CloseAll returns.
-func (d *Dialer) CloseAll() {
- d.mu.Lock()
- conns := d.conns
- d.conns = make(map[*closableConn]struct{})
- d.mu.Unlock()
-
- for conn := range conns {
- conn.Close()
- }
-}
-
-// Dial creates a new tracked connection.
-func (d *Dialer) Dial(network, address string) (net.Conn, error) {
- return d.DialContext(context.Background(), network, address)
-}
-
-// DialContext creates a new tracked connection.
-func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
- conn, err := d.dial(ctx, network, address)
- if err != nil {
- return nil, err
- }
-
- closable := &closableConn{Conn: conn}
-
- // Start tracking the connection
- d.mu.Lock()
- d.conns[closable] = struct{}{}
- d.mu.Unlock()
-
- // When the connection is closed, remove it from the map. This will
- // be no-op if the connection isn't in the map, e.g. if CloseAll()
- // is called.
- closable.onClose = func() {
- d.mu.Lock()
- delete(d.conns, closable)
- d.mu.Unlock()
- }
-
- return closable, nil
-}
-
-type closableConn struct {
- onClose func()
- net.Conn
-}
-
-func (c *closableConn) Close() error {
- go c.onClose()
- return c.Conn.Close()
-}
diff --git a/vendor/k8s.io/client-go/util/exec/exec.go b/vendor/k8s.io/client-go/util/exec/exec.go
deleted file mode 100644
index d170badb6..000000000
--- a/vendor/k8s.io/client-go/util/exec/exec.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package exec
-
-// ExitError is an interface that presents an API similar to os.ProcessState, which is
-// what ExitError from os/exec is. This is designed to make testing a bit easier and
-// probably loses some of the cross-platform properties of the underlying library.
-type ExitError interface {
- String() string
- Error() string
- Exited() bool
- ExitStatus() int
-}
-
-// CodeExitError is an implementation of ExitError consisting of an error object
-// and an exit code (the upper bits of os.exec.ExitStatus).
-type CodeExitError struct {
- Err error
- Code int
-}
-
-var _ ExitError = CodeExitError{}
-
-func (e CodeExitError) Error() string {
- return e.Err.Error()
-}
-
-func (e CodeExitError) String() string {
- return e.Err.Error()
-}
-
-func (e CodeExitError) Exited() bool {
- return true
-}
-
-func (e CodeExitError) ExitStatus() int {
- return e.Code
-}
diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
deleted file mode 100644
index 39cd72f95..000000000
--- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
-Copyright 2015 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package flowcontrol
-
-import (
- "sync"
- "time"
-
- "k8s.io/apimachinery/pkg/util/clock"
- "k8s.io/utils/integer"
-)
-
-type backoffEntry struct {
- backoff time.Duration
- lastUpdate time.Time
-}
-
-type Backoff struct {
- sync.Mutex
- Clock clock.Clock
- defaultDuration time.Duration
- maxDuration time.Duration
- perItemBackoff map[string]*backoffEntry
-}
-
-func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff {
- return &Backoff{
- perItemBackoff: map[string]*backoffEntry{},
- Clock: tc,
- defaultDuration: initial,
- maxDuration: max,
- }
-}
-
-func NewBackOff(initial, max time.Duration) *Backoff {
- return &Backoff{
- perItemBackoff: map[string]*backoffEntry{},
- Clock: clock.RealClock{},
- defaultDuration: initial,
- maxDuration: max,
- }
-}
-
-// Get the current backoff Duration
-func (p *Backoff) Get(id string) time.Duration {
- p.Lock()
- defer p.Unlock()
- var delay time.Duration
- entry, ok := p.perItemBackoff[id]
- if ok {
- delay = entry.backoff
- }
- return delay
-}
-
-// move backoff to the next mark, capping at maxDuration
-func (p *Backoff) Next(id string, eventTime time.Time) {
- p.Lock()
- defer p.Unlock()
- entry, ok := p.perItemBackoff[id]
- if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
- entry = p.initEntryUnsafe(id)
- } else {
- delay := entry.backoff * 2 // exponential
- entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration)))
- }
- entry.lastUpdate = p.Clock.Now()
-}
-
-// Reset forces clearing of all backoff data for a given key.
-func (p *Backoff) Reset(id string) {
- p.Lock()
- defer p.Unlock()
- delete(p.perItemBackoff, id)
-}
-
-// Returns True if the elapsed time since eventTime is smaller than the current backoff window
-func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
- p.Lock()
- defer p.Unlock()
- entry, ok := p.perItemBackoff[id]
- if !ok {
- return false
- }
- if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
- return false
- }
- return p.Clock.Since(eventTime) < entry.backoff
-}
-
-// Returns True if time since lastupdate is less than the current backoff window.
-func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
- p.Lock()
- defer p.Unlock()
- entry, ok := p.perItemBackoff[id]
- if !ok {
- return false
- }
- if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
- return false
- }
- return eventTime.Sub(entry.lastUpdate) < entry.backoff
-}
-
-// Garbage collect records that have aged past maxDuration. Backoff users are expected
-// to invoke this periodically.
-func (p *Backoff) GC() {
- p.Lock()
- defer p.Unlock()
- now := p.Clock.Now()
- for id, entry := range p.perItemBackoff {
- if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
- // GC when entry has not been updated for 2*maxDuration
- delete(p.perItemBackoff, id)
- }
- }
-}
-
-func (p *Backoff) DeleteEntry(id string) {
- p.Lock()
- defer p.Unlock()
- delete(p.perItemBackoff, id)
-}
-
-// Take a lock on *Backoff, before calling initEntryUnsafe
-func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
- entry := &backoffEntry{backoff: p.defaultDuration}
- p.perItemBackoff[id] = entry
- return entry
-}
-
-// After 2*maxDuration we restart the backoff factor to the beginning
-func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
- return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
-}
diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go
deleted file mode 100644
index e671c044d..000000000
--- a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
-Copyright 2014 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package flowcontrol
-
-import (
- "sync"
- "time"
-
- "golang.org/x/time/rate"
-)
-
-type RateLimiter interface {
- // TryAccept returns true if a token is taken immediately. Otherwise,
- // it returns false.
- TryAccept() bool
- // Accept returns once a token becomes available.
- Accept()
- // Stop stops the rate limiter, subsequent calls to CanAccept will return false
- Stop()
- // QPS returns QPS of this rate limiter
- QPS() float32
-}
-
-type tokenBucketRateLimiter struct {
- limiter *rate.Limiter
- clock Clock
- qps float32
-}
-
-// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
-// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
-// smoothed qps rate of 'qps'.
-// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
-// The maximum number of tokens in the bucket is capped at 'burst'.
-func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
- limiter := rate.NewLimiter(rate.Limit(qps), burst)
- return newTokenBucketRateLimiter(limiter, realClock{}, qps)
-}
-
-// An injectable, mockable clock interface.
-type Clock interface {
- Now() time.Time
- Sleep(time.Duration)
-}
-
-type realClock struct{}
-
-func (realClock) Now() time.Time {
- return time.Now()
-}
-func (realClock) Sleep(d time.Duration) {
- time.Sleep(d)
-}
-
-// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter
-// but allows an injectable clock, for testing.
-func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter {
- limiter := rate.NewLimiter(rate.Limit(qps), burst)
- return newTokenBucketRateLimiter(limiter, c, qps)
-}
-
-func newTokenBucketRateLimiter(limiter *rate.Limiter, c Clock, qps float32) RateLimiter {
- return &tokenBucketRateLimiter{
- limiter: limiter,
- clock: c,
- qps: qps,
- }
-}
-
-func (t *tokenBucketRateLimiter) TryAccept() bool {
- return t.limiter.AllowN(t.clock.Now(), 1)
-}
-
-// Accept will block until a token becomes available
-func (t *tokenBucketRateLimiter) Accept() {
- now := t.clock.Now()
- t.clock.Sleep(t.limiter.ReserveN(now, 1).DelayFrom(now))
-}
-
-func (t *tokenBucketRateLimiter) Stop() {
-}
-
-func (t *tokenBucketRateLimiter) QPS() float32 {
- return t.qps
-}
-
-type fakeAlwaysRateLimiter struct{}
-
-func NewFakeAlwaysRateLimiter() RateLimiter {
- return &fakeAlwaysRateLimiter{}
-}
-
-func (t *fakeAlwaysRateLimiter) TryAccept() bool {
- return true
-}
-
-func (t *fakeAlwaysRateLimiter) Stop() {}
-
-func (t *fakeAlwaysRateLimiter) Accept() {}
-
-func (t *fakeAlwaysRateLimiter) QPS() float32 {
- return 1
-}
-
-type fakeNeverRateLimiter struct {
- wg sync.WaitGroup
-}
-
-func NewFakeNeverRateLimiter() RateLimiter {
- rl := fakeNeverRateLimiter{}
- rl.wg.Add(1)
- return &rl
-}
-
-func (t *fakeNeverRateLimiter) TryAccept() bool {
- return false
-}
-
-func (t *fakeNeverRateLimiter) Stop() {
- t.wg.Done()
-}
-
-func (t *fakeNeverRateLimiter) Accept() {
- t.wg.Wait()
-}
-
-func (t *fakeNeverRateLimiter) QPS() float32 {
- return 1
-}
diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS
deleted file mode 100644
index 470b7a1c9..000000000
--- a/vendor/k8s.io/client-go/util/keyutil/OWNERS
+++ /dev/null
@@ -1,7 +0,0 @@
-approvers:
-- sig-auth-certificates-approvers
-reviewers:
-- sig-auth-certificates-reviewers
-labels:
-- sig/auth
-
diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go
deleted file mode 100644
index 83c2c6254..000000000
--- a/vendor/k8s.io/client-go/util/keyutil/key.go
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package keyutil contains utilities for managing public/private key pairs.
-package keyutil
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/elliptic"
- cryptorand "crypto/rand"
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-)
-
-const (
- // ECPrivateKeyBlockType is a possible value for pem.Block.Type.
- ECPrivateKeyBlockType = "EC PRIVATE KEY"
- // RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
- RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
- // PrivateKeyBlockType is a possible value for pem.Block.Type.
- PrivateKeyBlockType = "PRIVATE KEY"
- // PublicKeyBlockType is a possible value for pem.Block.Type.
- PublicKeyBlockType = "PUBLIC KEY"
-)
-
-// MakeEllipticPrivateKeyPEM creates an ECDSA private key
-func MakeEllipticPrivateKeyPEM() ([]byte, error) {
- privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
- if err != nil {
- return nil, err
- }
-
- derBytes, err := x509.MarshalECPrivateKey(privateKey)
- if err != nil {
- return nil, err
- }
-
- privateKeyPemBlock := &pem.Block{
- Type: ECPrivateKeyBlockType,
- Bytes: derBytes,
- }
- return pem.EncodeToMemory(privateKeyPemBlock), nil
-}
-
-// WriteKey writes the pem-encoded key data to keyPath.
-// The key file will be created with file mode 0600.
-// If the key file already exists, it will be overwritten.
-// The parent directory of the keyPath will be created as needed with file mode 0755.
-func WriteKey(keyPath string, data []byte) error {
- if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
- return err
- }
- return ioutil.WriteFile(keyPath, data, os.FileMode(0600))
-}
-
-// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
-// can't find one, it will generate a new key and store it there.
-func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
- loadedData, err := ioutil.ReadFile(keyPath)
- // Call verifyKeyData to ensure the file wasn't empty/corrupt.
- if err == nil && verifyKeyData(loadedData) {
- return loadedData, false, err
- }
- if !os.IsNotExist(err) {
- return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err)
- }
-
- generatedData, err := MakeEllipticPrivateKeyPEM()
- if err != nil {
- return nil, false, fmt.Errorf("error generating key: %v", err)
- }
- if err := WriteKey(keyPath, generatedData); err != nil {
- return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err)
- }
- return generatedData, true, nil
-}
-
-// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to
-// a PEM encoded block or returns an error.
-func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) {
- switch t := privateKey.(type) {
- case *ecdsa.PrivateKey:
- derBytes, err := x509.MarshalECPrivateKey(t)
- if err != nil {
- return nil, err
- }
- block := &pem.Block{
- Type: ECPrivateKeyBlockType,
- Bytes: derBytes,
- }
- return pem.EncodeToMemory(block), nil
- case *rsa.PrivateKey:
- block := &pem.Block{
- Type: RSAPrivateKeyBlockType,
- Bytes: x509.MarshalPKCS1PrivateKey(t),
- }
- return pem.EncodeToMemory(block), nil
- default:
- return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey)
- }
-}
-
-// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
-// Returns an error if the file could not be read or if the private key could not be parsed.
-func PrivateKeyFromFile(file string) (interface{}, error) {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- return nil, err
- }
- key, err := ParsePrivateKeyPEM(data)
- if err != nil {
- return nil, fmt.Errorf("error reading private key file %s: %v", file, err)
- }
- return key, nil
-}
-
-// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file.
-// Reads public keys from both public and private key files.
-func PublicKeysFromFile(file string) ([]interface{}, error) {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- return nil, err
- }
- keys, err := ParsePublicKeysPEM(data)
- if err != nil {
- return nil, fmt.Errorf("error reading public key file %s: %v", file, err)
- }
- return keys, nil
-}
-
-// verifyKeyData returns true if the provided data appears to be a valid private key.
-func verifyKeyData(data []byte) bool {
- if len(data) == 0 {
- return false
- }
- _, err := ParsePrivateKeyPEM(data)
- return err == nil
-}
-
-// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
-// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
-func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
- var privateKeyPemBlock *pem.Block
- for {
- privateKeyPemBlock, keyData = pem.Decode(keyData)
- if privateKeyPemBlock == nil {
- break
- }
-
- switch privateKeyPemBlock.Type {
- case ECPrivateKeyBlockType:
- // ECDSA Private Key in ASN.1 format
- if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
- return key, nil
- }
- case RSAPrivateKeyBlockType:
- // RSA Private Key in PKCS#1 format
- if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
- return key, nil
- }
- case PrivateKeyBlockType:
- // RSA or ECDSA Private Key in unencrypted PKCS#8 format
- if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
- return key, nil
- }
- }
-
- // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
- // originally, only the first PEM block was parsed and expected to be a key block
- }
-
- // we read all the PEM blocks and didn't recognize one
- return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
-}
-
-// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array.
-// Reads public keys from both public and private key files.
-func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) {
- var block *pem.Block
- keys := []interface{}{}
- for {
- // read the next block
- block, keyData = pem.Decode(keyData)
- if block == nil {
- break
- }
-
- // test block against parsing functions
- if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil {
- keys = append(keys, &privateKey.PublicKey)
- continue
- }
- if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil {
- keys = append(keys, publicKey)
- continue
- }
- if privateKey, err := parseECPrivateKey(block.Bytes); err == nil {
- keys = append(keys, &privateKey.PublicKey)
- continue
- }
- if publicKey, err := parseECPublicKey(block.Bytes); err == nil {
- keys = append(keys, publicKey)
- continue
- }
-
- // tolerate non-key PEM blocks for backwards compatibility
- // originally, only the first PEM block was parsed and expected to be a key block
- }
-
- if len(keys) == 0 {
- return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys")
- }
- return keys, nil
-}
-
-// parseRSAPublicKey parses a single RSA public key from the provided data
-func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) {
- var err error
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
- if cert, err := x509.ParseCertificate(data); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- // Test if parsed key is an RSA Public Key
- var pubKey *rsa.PublicKey
- var ok bool
- if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok {
- return nil, fmt.Errorf("data doesn't contain valid RSA Public Key")
- }
-
- return pubKey, nil
-}
-
-// parseRSAPrivateKey parses a single RSA private key from the provided data
-func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil {
- return nil, err
- }
- }
-
- // Test if parsed key is an RSA Private Key
- var privKey *rsa.PrivateKey
- var ok bool
- if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, fmt.Errorf("data doesn't contain valid RSA Private Key")
- }
-
- return privKey, nil
-}
-
-// parseECPublicKey parses a single ECDSA public key from the provided data
-func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) {
- var err error
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
- if cert, err := x509.ParseCertificate(data); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- // Test if parsed key is an ECDSA Public Key
- var pubKey *ecdsa.PublicKey
- var ok bool
- if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
- return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key")
- }
-
- return pubKey, nil
-}
-
-// parseECPrivateKey parses a single ECDSA private key from the provided data
-func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
- var err error
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParseECPrivateKey(data); err != nil {
- return nil, err
- }
-
- // Test if parsed key is an ECDSA Private Key
- var privKey *ecdsa.PrivateKey
- var ok bool
- if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
- return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key")
- }
-
- return privKey, nil
-}
diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml
deleted file mode 100644
index 5677664c2..000000000
--- a/vendor/k8s.io/klog/.travis.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-language: go
-go_import_path: k8s.io/klog
-dist: xenial
-go:
- - 1.9.x
- - 1.10.x
- - 1.11.x
- - 1.12.x
-script:
- - go get -t -v ./...
- - diff -u <(echo -n) <(gofmt -d .)
- - diff -u <(echo -n) <(golint $(go list -e ./...))
- - go tool vet . || go vet .
- - go test -v -race ./...
-install:
- - go get golang.org/x/lint/golint
diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md
deleted file mode 100644
index 574a56abb..000000000
--- a/vendor/k8s.io/klog/CONTRIBUTING.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Contributing Guidelines
-
-Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
-
-_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
-
-## Getting Started
-
-We have full documentation on how to get started contributing here:
-
-- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
-- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
-- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
-
-## Mentorship
-
-- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
-
-## Contact Information
-
-- [Slack](https://kubernetes.slack.com/messages/sig-architecture)
-- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
diff --git a/vendor/k8s.io/klog/LICENSE b/vendor/k8s.io/klog/LICENSE
deleted file mode 100644
index 37ec93a14..000000000
--- a/vendor/k8s.io/klog/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS
deleted file mode 100644
index 380e514f2..000000000
--- a/vendor/k8s.io/klog/OWNERS
+++ /dev/null
@@ -1,19 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-reviewers:
- - jayunit100
- - hoegaarden
- - andyxning
- - neolit123
- - pohly
- - yagonobre
- - vincepri
- - detiber
-approvers:
- - dims
- - thockin
- - justinsb
- - tallclair
- - piosz
- - brancz
- - DirectXMan12
- - lavalamp
diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md
deleted file mode 100644
index 841468b4b..000000000
--- a/vendor/k8s.io/klog/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-klog
-====
-
-klog is a permanent fork of https://github.com/golang/glog.
-
-## Why was klog created?
-
-The decision to create klog was one that wasn't made lightly, but it was necessary due to some
-drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README:
-
-> The code in this repo [...] is not itself under development
-
-This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below:
-
- * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented.
- * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it
- * A long term goal is to implement a logging interface that allows us to add context, change output format, etc.
-
-Historical context is available here:
-
- * https://github.com/kubernetes/kubernetes/issues/61006
- * https://github.com/kubernetes/kubernetes/issues/70264
- * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ
- * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ
-
-----
-
-How to use klog
-===============
-- Replace imports for `github.com/golang/glog` with `k8s.io/klog`
-- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags
-- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`)
-- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`)
-- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md))
-
-### Coexisting with glog
-This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`.
-
-## Community, discussion, contribution, and support
-
-Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
-
-You can reach the maintainers of this project at:
-
-- [Slack](https://kubernetes.slack.com/messages/sig-architecture)
-- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture)
-
-### Code of conduct
-
-Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
-
-----
-
-glog
-====
-
-Leveled execution logs for Go.
-
-This is an efficient pure Go implementation of leveled logs in the
-manner of the open source C++ package
- https://github.com/google/glog
-
-By binding methods to booleans it is possible to use the log package
-without paying the expense of evaluating the arguments to the log.
-Through the -vmodule flag, the package also provides fine-grained
-control over logging at the file level.
-
-The comment from glog.go introduces the ideas:
-
- Package glog implements logging analogous to the Google-internal
- C++ INFO/ERROR/V setup. It provides functions Info, Warning,
- Error, Fatal, plus formatting variants such as Infof. It
- also provides V-style logging controlled by the -v and
- -vmodule=file=2 flags.
-
- Basic examples:
-
- glog.Info("Prepare to repel boarders")
-
- glog.Fatalf("Initialization failed: %s", err)
-
- See the documentation for the V function for an explanation
- of these examples:
-
- if glog.V(2) {
- glog.Info("Starting transaction...")
- }
-
- glog.V(2).Infoln("Processed", nItems, "elements")
-
-
-The repository contains an open source version of the log package
-used inside Google. The master copy of the source lives inside
-Google, not here. The code in this repo is for export only and is not itself
-under development. Feature requests will be ignored.
-
-Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md
deleted file mode 100644
index b53eb960c..000000000
--- a/vendor/k8s.io/klog/RELEASE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Release Process
-
-The `klog` is released on an as-needed basis. The process is as follows:
-
-1. An issue is proposing a new release with a changelog since the last release
-1. All [OWNERS](OWNERS) must LGTM this release
-1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
-1. The release issue is closed
-1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS
deleted file mode 100644
index 6128a5869..000000000
--- a/vendor/k8s.io/klog/SECURITY_CONTACTS
+++ /dev/null
@@ -1,20 +0,0 @@
-# Defined below are the security contacts for this repo.
-#
-# They are the contact point for the Product Security Committee to reach out
-# to for triaging and handling of incoming issues.
-#
-# The below names agree to abide by the
-# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
-# and will be removed and replaced if they violate that agreement.
-#
-# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
-# INSTRUCTIONS AT https://kubernetes.io/security/
-
-dims
-thockin
-justinsb
-tallclair
-piosz
-brancz
-DirectXMan12
-lavalamp
diff --git a/vendor/k8s.io/klog/code-of-conduct.md b/vendor/k8s.io/klog/code-of-conduct.md
deleted file mode 100644
index 0d15c00cf..000000000
--- a/vendor/k8s.io/klog/code-of-conduct.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Kubernetes Community Code of Conduct
-
-Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod
deleted file mode 100644
index 3877d8546..000000000
--- a/vendor/k8s.io/klog/go.mod
+++ /dev/null
@@ -1,5 +0,0 @@
-module k8s.io/klog
-
-go 1.12
-
-require github.com/go-logr/logr v0.1.0
diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum
deleted file mode 100644
index fb64d277a..000000000
--- a/vendor/k8s.io/klog/go.sum
+++ /dev/null
@@ -1,2 +0,0 @@
-github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go
deleted file mode 100644
index 2712ce0af..000000000
--- a/vendor/k8s.io/klog/klog.go
+++ /dev/null
@@ -1,1308 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
-// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
-// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
-//
-// Basic examples:
-//
-// klog.Info("Prepare to repel boarders")
-//
-// klog.Fatalf("Initialization failed: %s", err)
-//
-// See the documentation for the V function for an explanation of these examples:
-//
-// if klog.V(2) {
-// klog.Info("Starting transaction...")
-// }
-//
-// klog.V(2).Infoln("Processed", nItems, "elements")
-//
-// Log output is buffered and written periodically using Flush. Programs
-// should call Flush before exiting to guarantee all log output is written.
-//
-// By default, all log statements write to standard error.
-// This package provides several flags that modify this behavior.
-// As a result, flag.Parse must be called before any logging is done.
-//
-// -logtostderr=true
-// Logs are written to standard error instead of to files.
-// -alsologtostderr=false
-// Logs are written to standard error as well as to files.
-// -stderrthreshold=ERROR
-// Log events at or above this severity are logged to standard
-// error as well as to files.
-// -log_dir=""
-// Log files will be written to this directory instead of the
-// default temporary directory.
-//
-// Other flags provide aids to debugging.
-//
-// -log_backtrace_at=""
-// When set to a file and line number holding a logging statement,
-// such as
-// -log_backtrace_at=gopherflakes.go:234
-// a stack trace will be written to the Info log whenever execution
-// hits that statement. (Unlike with -vmodule, the ".go" must be
-// present.)
-// -v=0
-// Enable V-leveled logging at the specified level.
-// -vmodule=""
-// The syntax of the argument is a comma-separated list of pattern=N,
-// where pattern is a literal file name (minus the ".go" suffix) or
-// "glob" pattern and N is a V level. For instance,
-// -vmodule=gopher*=3
-// sets the V level to 3 in all Go files whose names begin "gopher".
-//
-package klog
-
-import (
- "bufio"
- "bytes"
- "errors"
- "flag"
- "fmt"
- "io"
- stdLog "log"
- "math"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// severity identifies the sort of log: info, warning etc. It also implements
-// the flag.Value interface. The -stderrthreshold flag is of type severity and
-// should be modified only through the flag.Value interface. The values match
-// the corresponding constants in C++.
-type severity int32 // sync/atomic int32
-
-// These constants identify the log levels in order of increasing severity.
-// A message written to a high-severity log file is also written to each
-// lower-severity log file.
-const (
- infoLog severity = iota
- warningLog
- errorLog
- fatalLog
- numSeverity = 4
-)
-
-const severityChar = "IWEF"
-
-var severityName = []string{
- infoLog: "INFO",
- warningLog: "WARNING",
- errorLog: "ERROR",
- fatalLog: "FATAL",
-}
-
-// get returns the value of the severity.
-func (s *severity) get() severity {
- return severity(atomic.LoadInt32((*int32)(s)))
-}
-
-// set sets the value of the severity.
-func (s *severity) set(val severity) {
- atomic.StoreInt32((*int32)(s), int32(val))
-}
-
-// String is part of the flag.Value interface.
-func (s *severity) String() string {
- return strconv.FormatInt(int64(*s), 10)
-}
-
-// Get is part of the flag.Value interface.
-func (s *severity) Get() interface{} {
- return *s
-}
-
-// Set is part of the flag.Value interface.
-func (s *severity) Set(value string) error {
- var threshold severity
- // Is it a known name?
- if v, ok := severityByName(value); ok {
- threshold = v
- } else {
- v, err := strconv.ParseInt(value, 10, 32)
- if err != nil {
- return err
- }
- threshold = severity(v)
- }
- logging.stderrThreshold.set(threshold)
- return nil
-}
-
-func severityByName(s string) (severity, bool) {
- s = strings.ToUpper(s)
- for i, name := range severityName {
- if name == s {
- return severity(i), true
- }
- }
- return 0, false
-}
-
-// OutputStats tracks the number of output lines and bytes written.
-type OutputStats struct {
- lines int64
- bytes int64
-}
-
-// Lines returns the number of lines written.
-func (s *OutputStats) Lines() int64 {
- return atomic.LoadInt64(&s.lines)
-}
-
-// Bytes returns the number of bytes written.
-func (s *OutputStats) Bytes() int64 {
- return atomic.LoadInt64(&s.bytes)
-}
-
-// Stats tracks the number of lines of output and number of bytes
-// per severity level. Values must be read with atomic.LoadInt64.
-var Stats struct {
- Info, Warning, Error OutputStats
-}
-
-var severityStats = [numSeverity]*OutputStats{
- infoLog: &Stats.Info,
- warningLog: &Stats.Warning,
- errorLog: &Stats.Error,
-}
-
-// Level is exported because it appears in the arguments to V and is
-// the type of the v flag, which can be set programmatically.
-// It's a distinct type because we want to discriminate it from logType.
-// Variables of type level are only changed under logging.mu.
-// The -v flag is read only with atomic ops, so the state of the logging
-// module is consistent.
-
-// Level is treated as a sync/atomic int32.
-
-// Level specifies a level of verbosity for V logs. *Level implements
-// flag.Value; the -v flag is of type Level and should be modified
-// only through the flag.Value interface.
-type Level int32
-
-// get returns the value of the Level.
-func (l *Level) get() Level {
- return Level(atomic.LoadInt32((*int32)(l)))
-}
-
-// set sets the value of the Level.
-func (l *Level) set(val Level) {
- atomic.StoreInt32((*int32)(l), int32(val))
-}
-
-// String is part of the flag.Value interface.
-func (l *Level) String() string {
- return strconv.FormatInt(int64(*l), 10)
-}
-
-// Get is part of the flag.Value interface.
-func (l *Level) Get() interface{} {
- return *l
-}
-
-// Set is part of the flag.Value interface.
-func (l *Level) Set(value string) error {
- v, err := strconv.ParseInt(value, 10, 32)
- if err != nil {
- return err
- }
- logging.mu.Lock()
- defer logging.mu.Unlock()
- logging.setVState(Level(v), logging.vmodule.filter, false)
- return nil
-}
-
-// moduleSpec represents the setting of the -vmodule flag.
-type moduleSpec struct {
- filter []modulePat
-}
-
-// modulePat contains a filter for the -vmodule flag.
-// It holds a verbosity level and a file pattern to match.
-type modulePat struct {
- pattern string
- literal bool // The pattern is a literal string
- level Level
-}
-
-// match reports whether the file matches the pattern. It uses a string
-// comparison if the pattern contains no metacharacters.
-func (m *modulePat) match(file string) bool {
- if m.literal {
- return file == m.pattern
- }
- match, _ := filepath.Match(m.pattern, file)
- return match
-}
-
-func (m *moduleSpec) String() string {
- // Lock because the type is not atomic. TODO: clean this up.
- logging.mu.Lock()
- defer logging.mu.Unlock()
- var b bytes.Buffer
- for i, f := range m.filter {
- if i > 0 {
- b.WriteRune(',')
- }
- fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
- }
- return b.String()
-}
-
-// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
-// struct is not exported.
-func (m *moduleSpec) Get() interface{} {
- return nil
-}
-
-var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
-
-// Syntax: -vmodule=recordio=2,file=1,gfs*=3
-func (m *moduleSpec) Set(value string) error {
- var filter []modulePat
- for _, pat := range strings.Split(value, ",") {
- if len(pat) == 0 {
- // Empty strings such as from a trailing comma can be ignored.
- continue
- }
- patLev := strings.Split(pat, "=")
- if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
- return errVmoduleSyntax
- }
- pattern := patLev[0]
- v, err := strconv.ParseInt(patLev[1], 10, 32)
- if err != nil {
- return errors.New("syntax error: expect comma-separated list of filename=N")
- }
- if v < 0 {
- return errors.New("negative value for vmodule level")
- }
- if v == 0 {
- continue // Ignore. It's harmless but no point in paying the overhead.
- }
- // TODO: check syntax of filter?
- filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
- }
- logging.mu.Lock()
- defer logging.mu.Unlock()
- logging.setVState(logging.verbosity, filter, true)
- return nil
-}
-
-// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
-// that require filepath.Match to be called to match the pattern.
-func isLiteral(pattern string) bool {
- return !strings.ContainsAny(pattern, `\*?[]`)
-}
-
-// traceLocation represents the setting of the -log_backtrace_at flag.
-type traceLocation struct {
- file string
- line int
-}
-
-// isSet reports whether the trace location has been specified.
-// logging.mu is held.
-func (t *traceLocation) isSet() bool {
- return t.line > 0
-}
-
-// match reports whether the specified file and line matches the trace location.
-// The argument file name is the full path, not the basename specified in the flag.
-// logging.mu is held.
-func (t *traceLocation) match(file string, line int) bool {
- if t.line != line {
- return false
- }
- if i := strings.LastIndex(file, "/"); i >= 0 {
- file = file[i+1:]
- }
- return t.file == file
-}
-
-func (t *traceLocation) String() string {
- // Lock because the type is not atomic. TODO: clean this up.
- logging.mu.Lock()
- defer logging.mu.Unlock()
- return fmt.Sprintf("%s:%d", t.file, t.line)
-}
-
-// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
-// struct is not exported
-func (t *traceLocation) Get() interface{} {
- return nil
-}
-
-var errTraceSyntax = errors.New("syntax error: expect file.go:234")
-
-// Syntax: -log_backtrace_at=gopherflakes.go:234
-// Note that unlike vmodule the file extension is included here.
-func (t *traceLocation) Set(value string) error {
- if value == "" {
- // Unset.
- t.line = 0
- t.file = ""
- }
- fields := strings.Split(value, ":")
- if len(fields) != 2 {
- return errTraceSyntax
- }
- file, line := fields[0], fields[1]
- if !strings.Contains(file, ".") {
- return errTraceSyntax
- }
- v, err := strconv.Atoi(line)
- if err != nil {
- return errTraceSyntax
- }
- if v <= 0 {
- return errors.New("negative or zero value for level")
- }
- logging.mu.Lock()
- defer logging.mu.Unlock()
- t.line = v
- t.file = file
- return nil
-}
-
-// flushSyncWriter is the interface satisfied by logging destinations.
-type flushSyncWriter interface {
- Flush() error
- Sync() error
- io.Writer
-}
-
-// init sets up the defaults and runs flushDaemon.
-func init() {
- logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR.
- logging.setVState(0, nil, false)
- logging.logDir = ""
- logging.logFile = ""
- logging.logFileMaxSizeMB = 1800
- logging.toStderr = true
- logging.alsoToStderr = false
- logging.skipHeaders = false
- logging.addDirHeader = false
- logging.skipLogHeaders = false
- go logging.flushDaemon()
-}
-
-// InitFlags is for explicitly initializing the flags.
-func InitFlags(flagset *flag.FlagSet) {
- if flagset == nil {
- flagset = flag.CommandLine
- }
-
- flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory")
- flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file")
- flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB,
- "Defines the maximum size a log file can grow to. Unit is megabytes. "+
- "If the value is 0, the maximum file size is unlimited.")
- flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files")
- flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files")
- flagset.Var(&logging.verbosity, "v", "number for the log level verbosity")
- flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header")
- flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages")
- flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files")
- flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
- flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
- flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
-}
-
-// Flush flushes all pending log I/O.
-func Flush() {
- logging.lockAndFlushAll()
-}
-
-// loggingT collects all the global state of the logging setup.
-type loggingT struct {
- // Boolean flags. Not handled atomically because the flag.Value interface
- // does not let us avoid the =true, and that shorthand is necessary for
- // compatibility. TODO: does this matter enough to fix? Seems unlikely.
- toStderr bool // The -logtostderr flag.
- alsoToStderr bool // The -alsologtostderr flag.
-
- // Level flag. Handled atomically.
- stderrThreshold severity // The -stderrthreshold flag.
-
- // freeList is a list of byte buffers, maintained under freeListMu.
- freeList *buffer
- // freeListMu maintains the free list. It is separate from the main mutex
- // so buffers can be grabbed and printed to without holding the main lock,
- // for better parallelization.
- freeListMu sync.Mutex
-
- // mu protects the remaining elements of this structure and is
- // used to synchronize logging.
- mu sync.Mutex
- // file holds writer for each of the log types.
- file [numSeverity]flushSyncWriter
- // pcs is used in V to avoid an allocation when computing the caller's PC.
- pcs [1]uintptr
- // vmap is a cache of the V Level for each V() call site, identified by PC.
- // It is wiped whenever the vmodule flag changes state.
- vmap map[uintptr]Level
- // filterLength stores the length of the vmodule filter chain. If greater
- // than zero, it means vmodule is enabled. It may be read safely
- // using sync.LoadInt32, but is only modified under mu.
- filterLength int32
- // traceLocation is the state of the -log_backtrace_at flag.
- traceLocation traceLocation
- // These flags are modified only under lock, although verbosity may be fetched
- // safely using atomic.LoadInt32.
- vmodule moduleSpec // The state of the -vmodule flag.
- verbosity Level // V logging level, the value of the -v flag/
-
- // If non-empty, overrides the choice of directory in which to write logs.
- // See createLogDirs for the full list of possible destinations.
- logDir string
-
- // If non-empty, specifies the path of the file to write logs. mutually exclusive
- // with the log-dir option.
- logFile string
-
- // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the
- // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile.
- logFileMaxSizeMB uint64
-
- // If true, do not add the prefix headers, useful when used with SetOutput
- skipHeaders bool
-
- // If true, do not add the headers to log files
- skipLogHeaders bool
-
- // If true, add the file directory to the header
- addDirHeader bool
-}
-
-// buffer holds a byte Buffer for reuse. The zero value is ready for use.
-type buffer struct {
- bytes.Buffer
- tmp [64]byte // temporary byte array for creating headers.
- next *buffer
-}
-
-var logging loggingT
-
-// setVState sets a consistent state for V logging.
-// l.mu is held.
-func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
- // Turn verbosity off so V will not fire while we are in transition.
- logging.verbosity.set(0)
- // Ditto for filter length.
- atomic.StoreInt32(&logging.filterLength, 0)
-
- // Set the new filters and wipe the pc->Level map if the filter has changed.
- if setFilter {
- logging.vmodule.filter = filter
- logging.vmap = make(map[uintptr]Level)
- }
-
- // Things are consistent now, so enable filtering and verbosity.
- // They are enabled in order opposite to that in V.
- atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
- logging.verbosity.set(verbosity)
-}
-
-// getBuffer returns a new, ready-to-use buffer.
-func (l *loggingT) getBuffer() *buffer {
- l.freeListMu.Lock()
- b := l.freeList
- if b != nil {
- l.freeList = b.next
- }
- l.freeListMu.Unlock()
- if b == nil {
- b = new(buffer)
- } else {
- b.next = nil
- b.Reset()
- }
- return b
-}
-
-// putBuffer returns a buffer to the free list.
-func (l *loggingT) putBuffer(b *buffer) {
- if b.Len() >= 256 {
- // Let big buffers die a natural death.
- return
- }
- l.freeListMu.Lock()
- b.next = l.freeList
- l.freeList = b
- l.freeListMu.Unlock()
-}
-
-var timeNow = time.Now // Stubbed out for testing.
-
-/*
-header formats a log header as defined by the C++ implementation.
-It returns a buffer containing the formatted header and the user's file and line number.
-The depth specifies how many stack frames above lives the source line to be identified in the log message.
-
-Log lines have this form:
- Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
-where the fields are defined as follows:
- L A single character, representing the log level (eg 'I' for INFO)
- mm The month (zero padded; ie May is '05')
- dd The day (zero padded)
- hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
- threadid The space-padded thread ID as returned by GetTID()
- file The file name
- line The line number
- msg The user-supplied message
-*/
-func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
- _, file, line, ok := runtime.Caller(3 + depth)
- if !ok {
- file = "???"
- line = 1
- } else {
- if slash := strings.LastIndex(file, "/"); slash >= 0 {
- path := file
- file = path[slash+1:]
- if l.addDirHeader {
- if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 {
- file = path[dirsep+1:]
- }
- }
- }
- }
- return l.formatHeader(s, file, line), file, line
-}
-
-// formatHeader formats a log header using the provided file name and line number.
-func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
- now := timeNow()
- if line < 0 {
- line = 0 // not a real line number, but acceptable to someDigits
- }
- if s > fatalLog {
- s = infoLog // for safety.
- }
- buf := l.getBuffer()
- if l.skipHeaders {
- return buf
- }
-
- // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
- // It's worth about 3X. Fprintf is hard.
- _, month, day := now.Date()
- hour, minute, second := now.Clock()
- // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
- buf.tmp[0] = severityChar[s]
- buf.twoDigits(1, int(month))
- buf.twoDigits(3, day)
- buf.tmp[5] = ' '
- buf.twoDigits(6, hour)
- buf.tmp[8] = ':'
- buf.twoDigits(9, minute)
- buf.tmp[11] = ':'
- buf.twoDigits(12, second)
- buf.tmp[14] = '.'
- buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
- buf.tmp[21] = ' '
- buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
- buf.tmp[29] = ' '
- buf.Write(buf.tmp[:30])
- buf.WriteString(file)
- buf.tmp[0] = ':'
- n := buf.someDigits(1, line)
- buf.tmp[n+1] = ']'
- buf.tmp[n+2] = ' '
- buf.Write(buf.tmp[:n+3])
- return buf
-}
-
-// Some custom tiny helper functions to print the log header efficiently.
-
-const digits = "0123456789"
-
-// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
-func (buf *buffer) twoDigits(i, d int) {
- buf.tmp[i+1] = digits[d%10]
- d /= 10
- buf.tmp[i] = digits[d%10]
-}
-
-// nDigits formats an n-digit integer at buf.tmp[i],
-// padding with pad on the left.
-// It assumes d >= 0.
-func (buf *buffer) nDigits(n, i, d int, pad byte) {
- j := n - 1
- for ; j >= 0 && d > 0; j-- {
- buf.tmp[i+j] = digits[d%10]
- d /= 10
- }
- for ; j >= 0; j-- {
- buf.tmp[i+j] = pad
- }
-}
-
-// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
-func (buf *buffer) someDigits(i, d int) int {
- // Print into the top, then copy down. We know there's space for at least
- // a 10-digit number.
- j := len(buf.tmp)
- for {
- j--
- buf.tmp[j] = digits[d%10]
- d /= 10
- if d == 0 {
- break
- }
- }
- return copy(buf.tmp[i:], buf.tmp[j:])
-}
-
-func (l *loggingT) println(s severity, args ...interface{}) {
- buf, file, line := l.header(s, 0)
- fmt.Fprintln(buf, args...)
- l.output(s, buf, file, line, false)
-}
-
-func (l *loggingT) print(s severity, args ...interface{}) {
- l.printDepth(s, 1, args...)
-}
-
-func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
- buf, file, line := l.header(s, depth)
- fmt.Fprint(buf, args...)
- if buf.Bytes()[buf.Len()-1] != '\n' {
- buf.WriteByte('\n')
- }
- l.output(s, buf, file, line, false)
-}
-
-func (l *loggingT) printf(s severity, format string, args ...interface{}) {
- buf, file, line := l.header(s, 0)
- fmt.Fprintf(buf, format, args...)
- if buf.Bytes()[buf.Len()-1] != '\n' {
- buf.WriteByte('\n')
- }
- l.output(s, buf, file, line, false)
-}
-
-// printWithFileLine behaves like print but uses the provided file and line number. If
-// alsoLogToStderr is true, the log message always appears on standard error; it
-// will also appear in the log file unless --logtostderr is set.
-func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
- buf := l.formatHeader(s, file, line)
- fmt.Fprint(buf, args...)
- if buf.Bytes()[buf.Len()-1] != '\n' {
- buf.WriteByte('\n')
- }
- l.output(s, buf, file, line, alsoToStderr)
-}
-
-// redirectBuffer is used to set an alternate destination for the logs
-type redirectBuffer struct {
- w io.Writer
-}
-
-func (rb *redirectBuffer) Sync() error {
- return nil
-}
-
-func (rb *redirectBuffer) Flush() error {
- return nil
-}
-
-func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
- return rb.w.Write(bytes)
-}
-
-// SetOutput sets the output destination for all severities
-func SetOutput(w io.Writer) {
- logging.mu.Lock()
- defer logging.mu.Unlock()
- for s := fatalLog; s >= infoLog; s-- {
- rb := &redirectBuffer{
- w: w,
- }
- logging.file[s] = rb
- }
-}
-
-// SetOutputBySeverity sets the output destination for specific severity
-func SetOutputBySeverity(name string, w io.Writer) {
- logging.mu.Lock()
- defer logging.mu.Unlock()
- sev, ok := severityByName(name)
- if !ok {
- panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
- }
- rb := &redirectBuffer{
- w: w,
- }
- logging.file[sev] = rb
-}
-
-// output writes the data to the log files and releases the buffer.
-func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
- l.mu.Lock()
- if l.traceLocation.isSet() {
- if l.traceLocation.match(file, line) {
- buf.Write(stacks(false))
- }
- }
- data := buf.Bytes()
- if l.toStderr {
- os.Stderr.Write(data)
- } else {
- if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
- os.Stderr.Write(data)
- }
-
- if logging.logFile != "" {
- // Since we are using a single log file, all of the items in l.file array
- // will point to the same file, so just use one of them to write data.
- if l.file[infoLog] == nil {
- if err := l.createFiles(infoLog); err != nil {
- os.Stderr.Write(data) // Make sure the message appears somewhere.
- l.exit(err)
- }
- }
- l.file[infoLog].Write(data)
- } else {
- if l.file[s] == nil {
- if err := l.createFiles(s); err != nil {
- os.Stderr.Write(data) // Make sure the message appears somewhere.
- l.exit(err)
- }
- }
-
- switch s {
- case fatalLog:
- l.file[fatalLog].Write(data)
- fallthrough
- case errorLog:
- l.file[errorLog].Write(data)
- fallthrough
- case warningLog:
- l.file[warningLog].Write(data)
- fallthrough
- case infoLog:
- l.file[infoLog].Write(data)
- }
- }
- }
- if s == fatalLog {
- // If we got here via Exit rather than Fatal, print no stacks.
- if atomic.LoadUint32(&fatalNoStacks) > 0 {
- l.mu.Unlock()
- timeoutFlush(10 * time.Second)
- os.Exit(1)
- }
- // Dump all goroutine stacks before exiting.
- // First, make sure we see the trace for the current goroutine on standard error.
- // If -logtostderr has been specified, the loop below will do that anyway
- // as the first stack in the full dump.
- if !l.toStderr {
- os.Stderr.Write(stacks(false))
- }
- // Write the stack trace for all goroutines to the files.
- trace := stacks(true)
- logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
- for log := fatalLog; log >= infoLog; log-- {
- if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
- f.Write(trace)
- }
- }
- l.mu.Unlock()
- timeoutFlush(10 * time.Second)
- os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
- }
- l.putBuffer(buf)
- l.mu.Unlock()
- if stats := severityStats[s]; stats != nil {
- atomic.AddInt64(&stats.lines, 1)
- atomic.AddInt64(&stats.bytes, int64(len(data)))
- }
-}
-
-// timeoutFlush calls Flush and returns when it completes or after timeout
-// elapses, whichever happens first. This is needed because the hooks invoked
-// by Flush may deadlock when klog.Fatal is called from a hook that holds
-// a lock.
-func timeoutFlush(timeout time.Duration) {
- done := make(chan bool, 1)
- go func() {
- Flush() // calls logging.lockAndFlushAll()
- done <- true
- }()
- select {
- case <-done:
- case <-time.After(timeout):
- fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout)
- }
-}
-
-// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
-func stacks(all bool) []byte {
- // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
- n := 10000
- if all {
- n = 100000
- }
- var trace []byte
- for i := 0; i < 5; i++ {
- trace = make([]byte, n)
- nbytes := runtime.Stack(trace, all)
- if nbytes < len(trace) {
- return trace[:nbytes]
- }
- n *= 2
- }
- return trace
-}
-
-// logExitFunc provides a simple mechanism to override the default behavior
-// of exiting on error. Used in testing and to guarantee we reach a required exit
-// for fatal logs. Instead, exit could be a function rather than a method but that
-// would make its use clumsier.
-var logExitFunc func(error)
-
-// exit is called if there is trouble creating or writing log files.
-// It flushes the logs and exits the program; there's no point in hanging around.
-// l.mu is held.
-func (l *loggingT) exit(err error) {
- fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
- // If logExitFunc is set, we do that instead of exiting.
- if logExitFunc != nil {
- logExitFunc(err)
- return
- }
- l.flushAll()
- os.Exit(2)
-}
-
-// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
-// file's Sync method and providing a wrapper for the Write method that provides log
-// file rotation. There are conflicting methods, so the file cannot be embedded.
-// l.mu is held for all its methods.
-type syncBuffer struct {
- logger *loggingT
- *bufio.Writer
- file *os.File
- sev severity
- nbytes uint64 // The number of bytes written to this file
- maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
-}
-
-func (sb *syncBuffer) Sync() error {
- return sb.file.Sync()
-}
-
-// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
-func CalculateMaxSize() uint64 {
- if logging.logFile != "" {
- if logging.logFileMaxSizeMB == 0 {
- // If logFileMaxSizeMB is zero, we don't have limitations on the log size.
- return math.MaxUint64
- }
- // Flag logFileMaxSizeMB is in MB for user convenience.
- return logging.logFileMaxSizeMB * 1024 * 1024
- }
- // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size.
- return MaxSize
-}
-
-func (sb *syncBuffer) Write(p []byte) (n int, err error) {
- if sb.nbytes+uint64(len(p)) >= sb.maxbytes {
- if err := sb.rotateFile(time.Now(), false); err != nil {
- sb.logger.exit(err)
- }
- }
- n, err = sb.Writer.Write(p)
- sb.nbytes += uint64(n)
- if err != nil {
- sb.logger.exit(err)
- }
- return
-}
-
-// rotateFile closes the syncBuffer's file and starts a new one.
-// The startup argument indicates whether this is the initial startup of klog.
-// If startup is true, existing files are opened for appending instead of truncated.
-func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error {
- if sb.file != nil {
- sb.Flush()
- sb.file.Close()
- }
- var err error
- sb.file, _, err = create(severityName[sb.sev], now, startup)
- sb.nbytes = 0
- if err != nil {
- return err
- }
-
- sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
-
- if sb.logger.skipLogHeaders {
- return nil
- }
-
- // Write header.
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
- fmt.Fprintf(&buf, "Running on machine: %s\n", host)
- fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
- fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
- n, err := sb.file.Write(buf.Bytes())
- sb.nbytes += uint64(n)
- return err
-}
-
-// bufferSize sizes the buffer associated with each log file. It's large
-// so that log records can accumulate without the logging thread blocking
-// on disk I/O. The flushDaemon will block instead.
-const bufferSize = 256 * 1024
-
-// createFiles creates all the log files for severity from sev down to infoLog.
-// l.mu is held.
-func (l *loggingT) createFiles(sev severity) error {
- now := time.Now()
- // Files are created in decreasing severity order, so as soon as we find one
- // has already been created, we can stop.
- for s := sev; s >= infoLog && l.file[s] == nil; s-- {
- sb := &syncBuffer{
- logger: l,
- sev: s,
- maxbytes: CalculateMaxSize(),
- }
- if err := sb.rotateFile(now, true); err != nil {
- return err
- }
- l.file[s] = sb
- }
- return nil
-}
-
-const flushInterval = 5 * time.Second
-
-// flushDaemon periodically flushes the log file buffers.
-func (l *loggingT) flushDaemon() {
- for range time.NewTicker(flushInterval).C {
- l.lockAndFlushAll()
- }
-}
-
-// lockAndFlushAll is like flushAll but locks l.mu first.
-func (l *loggingT) lockAndFlushAll() {
- l.mu.Lock()
- l.flushAll()
- l.mu.Unlock()
-}
-
-// flushAll flushes all the logs and attempts to "sync" their data to disk.
-// l.mu is held.
-func (l *loggingT) flushAll() {
- // Flush from fatal down, in case there's trouble flushing.
- for s := fatalLog; s >= infoLog; s-- {
- file := l.file[s]
- if file != nil {
- file.Flush() // ignore error
- file.Sync() // ignore error
- }
- }
-}
-
-// CopyStandardLogTo arranges for messages written to the Go "log" package's
-// default logs to also appear in the Google logs for the named and lower
-// severities. Subsequent changes to the standard log's default output location
-// or format may break this behavior.
-//
-// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
-// recognized, CopyStandardLogTo panics.
-func CopyStandardLogTo(name string) {
- sev, ok := severityByName(name)
- if !ok {
- panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
- }
- // Set a log format that captures the user's file and line:
- // d.go:23: message
- stdLog.SetFlags(stdLog.Lshortfile)
- stdLog.SetOutput(logBridge(sev))
-}
-
-// logBridge provides the Write method that enables CopyStandardLogTo to connect
-// Go's standard logs to the logs provided by this package.
-type logBridge severity
-
-// Write parses the standard logging line and passes its components to the
-// logger for severity(lb).
-func (lb logBridge) Write(b []byte) (n int, err error) {
- var (
- file = "???"
- line = 1
- text string
- )
- // Split "d.go:23: message" into "d.go", "23", and "message".
- if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
- text = fmt.Sprintf("bad log format: %s", b)
- } else {
- file = string(parts[0])
- text = string(parts[2][1:]) // skip leading space
- line, err = strconv.Atoi(string(parts[1]))
- if err != nil {
- text = fmt.Sprintf("bad line number: %s", b)
- line = 1
- }
- }
- // printWithFileLine with alsoToStderr=true, so standard log messages
- // always appear on standard error.
- logging.printWithFileLine(severity(lb), file, line, true, text)
- return len(b), nil
-}
-
-// setV computes and remembers the V level for a given PC
-// when vmodule is enabled.
-// File pattern matching takes the basename of the file, stripped
-// of its .go suffix, and uses filepath.Match, which is a little more
-// general than the *? matching used in C++.
-// l.mu is held.
-func (l *loggingT) setV(pc uintptr) Level {
- fn := runtime.FuncForPC(pc)
- file, _ := fn.FileLine(pc)
- // The file is something like /a/b/c/d.go. We want just the d.
- if strings.HasSuffix(file, ".go") {
- file = file[:len(file)-3]
- }
- if slash := strings.LastIndex(file, "/"); slash >= 0 {
- file = file[slash+1:]
- }
- for _, filter := range l.vmodule.filter {
- if filter.match(file) {
- l.vmap[pc] = filter.level
- return filter.level
- }
- }
- l.vmap[pc] = 0
- return 0
-}
-
-// Verbose is a boolean type that implements Infof (like Printf) etc.
-// See the documentation of V for more information.
-type Verbose bool
-
-// V reports whether verbosity at the call site is at least the requested level.
-// The returned value is a boolean of type Verbose, which implements Info, Infoln
-// and Infof. These methods will write to the Info log if called.
-// Thus, one may write either
-// if klog.V(2) { klog.Info("log this") }
-// or
-// klog.V(2).Info("log this")
-// The second form is shorter but the first is cheaper if logging is off because it does
-// not evaluate its arguments.
-//
-// Whether an individual call to V generates a log record depends on the setting of
-// the -v and --vmodule flags; both are off by default. If the level in the call to
-// V is at least the value of -v, or of -vmodule for the source file containing the
-// call, the V call will log.
-func V(level Level) Verbose {
- // This function tries hard to be cheap unless there's work to do.
- // The fast path is two atomic loads and compares.
-
- // Here is a cheap but safe test to see if V logging is enabled globally.
- if logging.verbosity.get() >= level {
- return Verbose(true)
- }
-
- // It's off globally but it vmodule may still be set.
- // Here is another cheap but safe test to see if vmodule is enabled.
- if atomic.LoadInt32(&logging.filterLength) > 0 {
- // Now we need a proper lock to use the logging structure. The pcs field
- // is shared so we must lock before accessing it. This is fairly expensive,
- // but if V logging is enabled we're slow anyway.
- logging.mu.Lock()
- defer logging.mu.Unlock()
- if runtime.Callers(2, logging.pcs[:]) == 0 {
- return Verbose(false)
- }
- v, ok := logging.vmap[logging.pcs[0]]
- if !ok {
- v = logging.setV(logging.pcs[0])
- }
- return Verbose(v >= level)
- }
- return Verbose(false)
-}
-
-// Info is equivalent to the global Info function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Info(args ...interface{}) {
- if v {
- logging.print(infoLog, args...)
- }
-}
-
-// Infoln is equivalent to the global Infoln function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infoln(args ...interface{}) {
- if v {
- logging.println(infoLog, args...)
- }
-}
-
-// Infof is equivalent to the global Infof function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infof(format string, args ...interface{}) {
- if v {
- logging.printf(infoLog, format, args...)
- }
-}
-
-// Info logs to the INFO log.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Info(args ...interface{}) {
- logging.print(infoLog, args...)
-}
-
-// InfoDepth acts as Info but uses depth to determine which call frame to log.
-// InfoDepth(0, "msg") is the same as Info("msg").
-func InfoDepth(depth int, args ...interface{}) {
- logging.printDepth(infoLog, depth, args...)
-}
-
-// Infoln logs to the INFO log.
-// Arguments are handled in the manner of fmt.Println; a newline is always appended.
-func Infoln(args ...interface{}) {
- logging.println(infoLog, args...)
-}
-
-// Infof logs to the INFO log.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Infof(format string, args ...interface{}) {
- logging.printf(infoLog, format, args...)
-}
-
-// Warning logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Warning(args ...interface{}) {
- logging.print(warningLog, args...)
-}
-
-// WarningDepth acts as Warning but uses depth to determine which call frame to log.
-// WarningDepth(0, "msg") is the same as Warning("msg").
-func WarningDepth(depth int, args ...interface{}) {
- logging.printDepth(warningLog, depth, args...)
-}
-
-// Warningln logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is always appended.
-func Warningln(args ...interface{}) {
- logging.println(warningLog, args...)
-}
-
-// Warningf logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Warningf(format string, args ...interface{}) {
- logging.printf(warningLog, format, args...)
-}
-
-// Error logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Error(args ...interface{}) {
- logging.print(errorLog, args...)
-}
-
-// ErrorDepth acts as Error but uses depth to determine which call frame to log.
-// ErrorDepth(0, "msg") is the same as Error("msg").
-func ErrorDepth(depth int, args ...interface{}) {
- logging.printDepth(errorLog, depth, args...)
-}
-
-// Errorln logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is always appended.
-func Errorln(args ...interface{}) {
- logging.println(errorLog, args...)
-}
-
-// Errorf logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Errorf(format string, args ...interface{}) {
- logging.printf(errorLog, format, args...)
-}
-
-// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(255).
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Fatal(args ...interface{}) {
- logging.print(fatalLog, args...)
-}
-
-// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
-// FatalDepth(0, "msg") is the same as Fatal("msg").
-func FatalDepth(depth int, args ...interface{}) {
- logging.printDepth(fatalLog, depth, args...)
-}
-
-// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(255).
-// Arguments are handled in the manner of fmt.Println; a newline is always appended.
-func Fatalln(args ...interface{}) {
- logging.println(fatalLog, args...)
-}
-
-// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(255).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Fatalf(format string, args ...interface{}) {
- logging.printf(fatalLog, format, args...)
-}
-
-// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
-// It allows Exit and relatives to use the Fatal logs.
-var fatalNoStacks uint32
-
-// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Exit(args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.print(fatalLog, args...)
-}
-
-// ExitDepth acts as Exit but uses depth to determine which call frame to log.
-// ExitDepth(0, "msg") is the same as Exit("msg").
-func ExitDepth(depth int, args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.printDepth(fatalLog, depth, args...)
-}
-
-// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-func Exitln(args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.println(fatalLog, args...)
-}
-
-// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Exitf(format string, args ...interface{}) {
- atomic.StoreUint32(&fatalNoStacks, 1)
- logging.printf(fatalLog, format, args...)
-}
diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go
deleted file mode 100644
index e4010ad4d..000000000
--- a/vendor/k8s.io/klog/klog_file.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// File I/O for logs.
-
-package klog
-
-import (
- "errors"
- "fmt"
- "os"
- "os/user"
- "path/filepath"
- "strings"
- "sync"
- "time"
-)
-
-// MaxSize is the maximum size of a log file in bytes.
-var MaxSize uint64 = 1024 * 1024 * 1800
-
-// logDirs lists the candidate directories for new log files.
-var logDirs []string
-
-func createLogDirs() {
- if logging.logDir != "" {
- logDirs = append(logDirs, logging.logDir)
- }
- logDirs = append(logDirs, os.TempDir())
-}
-
-var (
- pid = os.Getpid()
- program = filepath.Base(os.Args[0])
- host = "unknownhost"
- userName = "unknownuser"
-)
-
-func init() {
- h, err := os.Hostname()
- if err == nil {
- host = shortHostname(h)
- }
-
- current, err := user.Current()
- if err == nil {
- userName = current.Username
- }
-
- // Sanitize userName since it may contain filepath separators on Windows.
- userName = strings.Replace(userName, `\`, "_", -1)
-}
-
-// shortHostname returns its argument, truncating at the first period.
-// For instance, given "www.google.com" it returns "www".
-func shortHostname(hostname string) string {
- if i := strings.Index(hostname, "."); i >= 0 {
- return hostname[:i]
- }
- return hostname
-}
-
-// logName returns a new log file name containing tag, with start time t, and
-// the name for the symlink for tag.
-func logName(tag string, t time.Time) (name, link string) {
- name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
- program,
- host,
- userName,
- tag,
- t.Year(),
- t.Month(),
- t.Day(),
- t.Hour(),
- t.Minute(),
- t.Second(),
- pid)
- return name, program + "." + tag
-}
-
-var onceLogDirs sync.Once
-
-// create creates a new log file and returns the file and its filename, which
-// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
-// successfully, create also attempts to update the symlink for that tag, ignoring
-// errors.
-// The startup argument indicates whether this is the initial startup of klog.
-// If startup is true, existing files are opened for appending instead of truncated.
-func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) {
- if logging.logFile != "" {
- f, err := openOrCreate(logging.logFile, startup)
- if err == nil {
- return f, logging.logFile, nil
- }
- return nil, "", fmt.Errorf("log: unable to create log: %v", err)
- }
- onceLogDirs.Do(createLogDirs)
- if len(logDirs) == 0 {
- return nil, "", errors.New("log: no log dirs")
- }
- name, link := logName(tag, t)
- var lastErr error
- for _, dir := range logDirs {
- fname := filepath.Join(dir, name)
- f, err := openOrCreate(fname, startup)
- if err == nil {
- symlink := filepath.Join(dir, link)
- os.Remove(symlink) // ignore err
- os.Symlink(name, symlink) // ignore err
- return f, fname, nil
- }
- lastErr = err
- }
- return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
-}
-
-// The startup argument indicates whether this is the initial startup of klog.
-// If startup is true, existing files are opened for appending instead of truncated.
-func openOrCreate(name string, startup bool) (*os.File, error) {
- if startup {
- f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
- return f, err
- }
- f, err := os.Create(name)
- return f, err
-}
diff --git a/vendor/k8s.io/utils/LICENSE b/vendor/k8s.io/utils/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/k8s.io/utils/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go
deleted file mode 100644
index e4e740cad..000000000
--- a/vendor/k8s.io/utils/integer/integer.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package integer
-
-// IntMax returns the maximum of the params
-func IntMax(a, b int) int {
- if b > a {
- return b
- }
- return a
-}
-
-// IntMin returns the minimum of the params
-func IntMin(a, b int) int {
- if b < a {
- return b
- }
- return a
-}
-
-// Int32Max returns the maximum of the params
-func Int32Max(a, b int32) int32 {
- if b > a {
- return b
- }
- return a
-}
-
-// Int32Min returns the minimum of the params
-func Int32Min(a, b int32) int32 {
- if b < a {
- return b
- }
- return a
-}
-
-// Int64Max returns the maximum of the params
-func Int64Max(a, b int64) int64 {
- if b > a {
- return b
- }
- return a
-}
-
-// Int64Min returns the minimum of the params
-func Int64Min(a, b int64) int64 {
- if b < a {
- return b
- }
- return a
-}
-
-// RoundToInt32 rounds floats into integer numbers.
-func RoundToInt32(a float64) int32 {
- if a < 0 {
- return int32(a - 0.5)
- }
- return int32(a + 0.5)
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index c323f09fc..9d0d9b996 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -89,7 +89,7 @@ github.com/containers/buildah/pkg/parse
github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/supplemented
github.com/containers/buildah/util
-# github.com/containers/common v0.34.3-0.20210208115708-8668c76dd577
+# github.com/containers/common v0.35.0
github.com/containers/common/pkg/apparmor
github.com/containers/common/pkg/apparmor/internal/supported
github.com/containers/common/pkg/auth
@@ -302,9 +302,6 @@ github.com/docker/go-units
github.com/docker/libnetwork/resolvconf
github.com/docker/libnetwork/resolvconf/dns
github.com/docker/libnetwork/types
-# github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96
-github.com/docker/spdystream
-github.com/docker/spdystream/spdy
# github.com/fsnotify/fsnotify v1.4.9
github.com/fsnotify/fsnotify
# github.com/fsouza/go-dockerclient v1.6.6
@@ -634,7 +631,6 @@ golang.org/x/crypto/ssh/knownhosts
golang.org/x/crypto/ssh/terminal
# golang.org/x/net v0.0.0-20210119194325-5f4716e94777
golang.org/x/net/context
-golang.org/x/net/context/ctxhttp
golang.org/x/net/html
golang.org/x/net/html/atom
golang.org/x/net/html/charset
@@ -646,9 +642,6 @@ golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
-golang.org/x/oauth2
-golang.org/x/oauth2/internal
# golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
@@ -681,18 +674,8 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.0.0-20191024005414-555d28b269f0
-golang.org/x/time/rate
# golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e
golang.org/x/tools/go/ast/inspector
-# google.golang.org/appengine v1.6.6
-google.golang.org/appengine/internal
-google.golang.org/appengine/internal/base
-google.golang.org/appengine/internal/datastore
-google.golang.org/appengine/internal/log
-google.golang.org/appengine/internal/remote_api
-google.golang.org/appengine/internal/urlfetch
-google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.33.2
@@ -784,69 +767,30 @@ gopkg.in/yaml.v3
k8s.io/api/apps/v1
k8s.io/api/core/v1
# k8s.io/apimachinery v0.20.4
-k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/apis/meta/v1
-k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
k8s.io/apimachinery/pkg/conversion
k8s.io/apimachinery/pkg/conversion/queryparams
k8s.io/apimachinery/pkg/fields
k8s.io/apimachinery/pkg/labels
k8s.io/apimachinery/pkg/runtime
k8s.io/apimachinery/pkg/runtime/schema
-k8s.io/apimachinery/pkg/runtime/serializer
-k8s.io/apimachinery/pkg/runtime/serializer/json
-k8s.io/apimachinery/pkg/runtime/serializer/protobuf
-k8s.io/apimachinery/pkg/runtime/serializer/recognizer
-k8s.io/apimachinery/pkg/runtime/serializer/streaming
-k8s.io/apimachinery/pkg/runtime/serializer/versioning
k8s.io/apimachinery/pkg/selection
k8s.io/apimachinery/pkg/types
-k8s.io/apimachinery/pkg/util/clock
k8s.io/apimachinery/pkg/util/errors
-k8s.io/apimachinery/pkg/util/framer
-k8s.io/apimachinery/pkg/util/httpstream
-k8s.io/apimachinery/pkg/util/httpstream/spdy
k8s.io/apimachinery/pkg/util/intstr
k8s.io/apimachinery/pkg/util/json
k8s.io/apimachinery/pkg/util/naming
k8s.io/apimachinery/pkg/util/net
-k8s.io/apimachinery/pkg/util/remotecommand
k8s.io/apimachinery/pkg/util/runtime
k8s.io/apimachinery/pkg/util/sets
k8s.io/apimachinery/pkg/util/validation
k8s.io/apimachinery/pkg/util/validation/field
-k8s.io/apimachinery/pkg/util/yaml
-k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
-k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/client-go v0.0.0-20190620085101-78d2af792bab
-k8s.io/client-go/pkg/apis/clientauthentication
-k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
-k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
-k8s.io/client-go/pkg/version
-k8s.io/client-go/plugin/pkg/client/auth/exec
-k8s.io/client-go/rest
-k8s.io/client-go/rest/watch
-k8s.io/client-go/tools/clientcmd/api
-k8s.io/client-go/tools/metrics
-k8s.io/client-go/tools/remotecommand
-k8s.io/client-go/transport
-k8s.io/client-go/transport/spdy
-k8s.io/client-go/util/cert
-k8s.io/client-go/util/connrotation
-k8s.io/client-go/util/exec
-k8s.io/client-go/util/flowcontrol
k8s.io/client-go/util/homedir
-k8s.io/client-go/util/keyutil
-# k8s.io/klog v1.0.0
-k8s.io/klog
# k8s.io/klog/v2 v2.4.0
k8s.io/klog/v2
-# k8s.io/utils v0.0.0-20190221042446-c2654d5206da
-k8s.io/utils/integer
# sigs.k8s.io/structured-merge-diff/v4 v4.0.2
sigs.k8s.io/structured-merge-diff/v4/value
-# sigs.k8s.io/yaml v1.2.0
-sigs.k8s.io/yaml
diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore
deleted file mode 100644
index e256a31e0..000000000
--- a/vendor/sigs.k8s.io/yaml/.gitignore
+++ /dev/null
@@ -1,20 +0,0 @@
-# OSX leaves these everywhere on SMB shares
-._*
-
-# Eclipse files
-.classpath
-.project
-.settings/**
-
-# Emacs save files
-*~
-
-# Vim-related files
-[._]*.s[a-w][a-z]
-[._]s[a-w][a-z]
-*.un~
-Session.vim
-.netrwhist
-
-# Go test binaries
-*.test
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
deleted file mode 100644
index d20e23eff..000000000
--- a/vendor/sigs.k8s.io/yaml/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-dist: xenial
-go:
- - 1.12.x
- - 1.13.x
-script:
- - diff -u <(echo -n) <(gofmt -d *.go)
- - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
- - GO111MODULE=on go vet .
- - GO111MODULE=on go test -v -race ./...
- - git diff --exit-code
-install:
- - GO111MODULE=off go get golang.org/x/lint/golint
diff --git a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md
deleted file mode 100644
index de4711513..000000000
--- a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Contributing Guidelines
-
-Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
-
-_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
-
-## Getting Started
-
-We have full documentation on how to get started contributing here:
-
-<!---
-If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
--->
-
-- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
-- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
-- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
-
-## Mentorship
-
-- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
-
-<!---
-Custom Information - if you're copying this template for the first time you can add custom content here, for example:
-
-## Contact Information
-
-- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel.
-- [Mailing list](URL)
-
--->
diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE
deleted file mode 100644
index 7805d36de..000000000
--- a/vendor/sigs.k8s.io/yaml/LICENSE
+++ /dev/null
@@ -1,50 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Sam Ghods
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS
deleted file mode 100644
index 325b40b07..000000000
--- a/vendor/sigs.k8s.io/yaml/OWNERS
+++ /dev/null
@@ -1,27 +0,0 @@
-# See the OWNERS docs at https://go.k8s.io/owners
-
-approvers:
-- dims
-- lavalamp
-- smarterclayton
-- deads2k
-- sttts
-- liggitt
-- caesarxuchao
-reviewers:
-- dims
-- thockin
-- lavalamp
-- smarterclayton
-- wojtek-t
-- deads2k
-- derekwaynecarr
-- caesarxuchao
-- mikedanese
-- liggitt
-- gmarek
-- sttts
-- ncdc
-- tallclair
-labels:
-- sig/api-machinery
diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md
deleted file mode 100644
index 5a651d916..000000000
--- a/vendor/sigs.k8s.io/yaml/README.md
+++ /dev/null
@@ -1,123 +0,0 @@
-# YAML marshaling and unmarshaling support for Go
-
-[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml)
-
-kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml).
-
-## Introduction
-
-A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
-
-In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
-
-## Compatibility
-
-This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
-
-## Caveats
-
-**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
-
-```
-BAD:
- exampleKey: !!binary gIGC
-
-GOOD:
- exampleKey: gIGC
-... and decode the base64 data in your code.
-```
-
-**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
-
-## Installation and usage
-
-To install, run:
-
-```
-$ go get sigs.k8s.io/yaml
-```
-
-And import using:
-
-```
-import "sigs.k8s.io/yaml"
-```
-
-Usage is very similar to the JSON library:
-
-```go
-package main
-
-import (
- "fmt"
-
- "sigs.k8s.io/yaml"
-)
-
-type Person struct {
- Name string `json:"name"` // Affects YAML field names too.
- Age int `json:"age"`
-}
-
-func main() {
- // Marshal a Person struct to YAML.
- p := Person{"John", 30}
- y, err := yaml.Marshal(p)
- if err != nil {
- fmt.Printf("err: %v\n", err)
- return
- }
- fmt.Println(string(y))
- /* Output:
- age: 30
- name: John
- */
-
- // Unmarshal the YAML back into a Person struct.
- var p2 Person
- err = yaml.Unmarshal(y, &p2)
- if err != nil {
- fmt.Printf("err: %v\n", err)
- return
- }
- fmt.Println(p2)
- /* Output:
- {John 30}
- */
-}
-```
-
-`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
-
-```go
-package main
-
-import (
- "fmt"
-
- "sigs.k8s.io/yaml"
-)
-
-func main() {
- j := []byte(`{"name": "John", "age": 30}`)
- y, err := yaml.JSONToYAML(j)
- if err != nil {
- fmt.Printf("err: %v\n", err)
- return
- }
- fmt.Println(string(y))
- /* Output:
- name: John
- age: 30
- */
- j2, err := yaml.YAMLToJSON(y)
- if err != nil {
- fmt.Printf("err: %v\n", err)
- return
- }
- fmt.Println(string(j2))
- /* Output:
- {"age":30,"name":"John"}
- */
-}
-```
diff --git a/vendor/sigs.k8s.io/yaml/RELEASE.md b/vendor/sigs.k8s.io/yaml/RELEASE.md
deleted file mode 100644
index 6b642464e..000000000
--- a/vendor/sigs.k8s.io/yaml/RELEASE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Release Process
-
-The `yaml` Project is released on an as-needed basis. The process is as follows:
-
-1. An issue is proposing a new release with a changelog since the last release
-1. All [OWNERS](OWNERS) must LGTM this release
-1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
-1. The release issue is closed
-1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`
diff --git a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS
deleted file mode 100644
index 0648a8ebf..000000000
--- a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS
+++ /dev/null
@@ -1,17 +0,0 @@
-# Defined below are the security contacts for this repo.
-#
-# They are the contact point for the Product Security Team to reach out
-# to for triaging and handling of incoming issues.
-#
-# The below names agree to abide by the
-# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
-# and will be removed and replaced if they violate that agreement.
-#
-# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
-# INSTRUCTIONS AT https://kubernetes.io/security/
-
-cjcullen
-jessfraz
-liggitt
-philips
-tallclair
diff --git a/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/vendor/sigs.k8s.io/yaml/code-of-conduct.md
deleted file mode 100644
index 0d15c00cf..000000000
--- a/vendor/sigs.k8s.io/yaml/code-of-conduct.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Kubernetes Community Code of Conduct
-
-Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go
deleted file mode 100644
index 235b7f2cf..000000000
--- a/vendor/sigs.k8s.io/yaml/fields.go
+++ /dev/null
@@ -1,502 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package yaml
-
-import (
- "bytes"
- "encoding"
- "encoding/json"
- "reflect"
- "sort"
- "strings"
- "sync"
- "unicode"
- "unicode/utf8"
-)
-
-// indirect walks down v allocating pointers as needed,
-// until it gets to a non-pointer.
-// if it encounters an Unmarshaler, indirect stops and returns that.
-// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
-func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
- // If v is a named type and is addressable,
- // start with its address, so that if the type has pointer methods,
- // we find them.
- if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
- v = v.Addr()
- }
- for {
- // Load value from interface, but only if the result will be
- // usefully addressable.
- if v.Kind() == reflect.Interface && !v.IsNil() {
- e := v.Elem()
- if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
- v = e
- continue
- }
- }
-
- if v.Kind() != reflect.Ptr {
- break
- }
-
- if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
- break
- }
- if v.IsNil() {
- if v.CanSet() {
- v.Set(reflect.New(v.Type().Elem()))
- } else {
- v = reflect.New(v.Type().Elem())
- }
- }
- if v.Type().NumMethod() > 0 {
- if u, ok := v.Interface().(json.Unmarshaler); ok {
- return u, nil, reflect.Value{}
- }
- if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
- return nil, u, reflect.Value{}
- }
- }
- v = v.Elem()
- }
- return nil, nil, v
-}
-
-// A field represents a single field found in a struct.
-type field struct {
- name string
- nameBytes []byte // []byte(name)
- equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
-
- tag bool
- index []int
- typ reflect.Type
- omitEmpty bool
- quoted bool
-}
-
-func fillField(f field) field {
- f.nameBytes = []byte(f.name)
- f.equalFold = foldFunc(f.nameBytes)
- return f
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from json tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
- if x[i].name != x[j].name {
- return x[i].name < x[j].name
- }
- if len(x[i].index) != len(x[j].index) {
- return len(x[i].index) < len(x[j].index)
- }
- if x[i].tag != x[j].tag {
- return x[i].tag
- }
- return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
- for k, xik := range x[i].index {
- if k >= len(x[j].index) {
- return false
- }
- if xik != x[j].index[k] {
- return xik < x[j].index[k]
- }
- }
- return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that JSON should recognize for the given type.
-// The algorithm is breadth-first search over the set of structs to include - the top struct
-// and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
- // Anonymous fields to explore at the current level and the next.
- current := []field{}
- next := []field{{typ: t}}
-
- // Count of queued names for current level and the next.
- count := map[reflect.Type]int{}
- nextCount := map[reflect.Type]int{}
-
- // Types already visited at an earlier level.
- visited := map[reflect.Type]bool{}
-
- // Fields found.
- var fields []field
-
- for len(next) > 0 {
- current, next = next, current[:0]
- count, nextCount = nextCount, map[reflect.Type]int{}
-
- for _, f := range current {
- if visited[f.typ] {
- continue
- }
- visited[f.typ] = true
-
- // Scan f.typ for fields to include.
- for i := 0; i < f.typ.NumField(); i++ {
- sf := f.typ.Field(i)
- if sf.PkgPath != "" { // unexported
- continue
- }
- tag := sf.Tag.Get("json")
- if tag == "-" {
- continue
- }
- name, opts := parseTag(tag)
- if !isValidTag(name) {
- name = ""
- }
- index := make([]int, len(f.index)+1)
- copy(index, f.index)
- index[len(f.index)] = i
-
- ft := sf.Type
- if ft.Name() == "" && ft.Kind() == reflect.Ptr {
- // Follow pointer.
- ft = ft.Elem()
- }
-
- // Record found field and index sequence.
- if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
- tagged := name != ""
- if name == "" {
- name = sf.Name
- }
- fields = append(fields, fillField(field{
- name: name,
- tag: tagged,
- index: index,
- typ: ft,
- omitEmpty: opts.Contains("omitempty"),
- quoted: opts.Contains("string"),
- }))
- if count[f.typ] > 1 {
- // If there were multiple instances, add a second,
- // so that the annihilation code will see a duplicate.
- // It only cares about the distinction between 1 or 2,
- // so don't bother generating any more copies.
- fields = append(fields, fields[len(fields)-1])
- }
- continue
- }
-
- // Record new anonymous struct to explore in next round.
- nextCount[ft]++
- if nextCount[ft] == 1 {
- next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
- }
- }
- }
- }
-
- sort.Sort(byName(fields))
-
- // Delete all fields that are hidden by the Go rules for embedded fields,
- // except that fields with JSON tags are promoted.
-
- // The fields are sorted in primary order of name, secondary order
- // of field index length. Loop over names; for each name, delete
- // hidden fields by choosing the one dominant field that survives.
- out := fields[:0]
- for advance, i := 0, 0; i < len(fields); i += advance {
- // One iteration per name.
- // Find the sequence of fields with the name of this first field.
- fi := fields[i]
- name := fi.name
- for advance = 1; i+advance < len(fields); advance++ {
- fj := fields[i+advance]
- if fj.name != name {
- break
- }
- }
- if advance == 1 { // Only one field with this name
- out = append(out, fi)
- continue
- }
- dominant, ok := dominantField(fields[i : i+advance])
- if ok {
- out = append(out, dominant)
- }
- }
-
- fields = out
- sort.Sort(byIndex(fields))
-
- return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// JSON tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
- // The fields are sorted in increasing index-length order. The winner
- // must therefore be one with the shortest index length. Drop all
- // longer entries, which is easy: just truncate the slice.
- length := len(fields[0].index)
- tagged := -1 // Index of first tagged field.
- for i, f := range fields {
- if len(f.index) > length {
- fields = fields[:i]
- break
- }
- if f.tag {
- if tagged >= 0 {
- // Multiple tagged fields at the same level: conflict.
- // Return no field.
- return field{}, false
- }
- tagged = i
- }
- }
- if tagged >= 0 {
- return fields[tagged], true
- }
- // All remaining fields have the same length. If there's more than one,
- // we have a conflict (two fields named "X" at the same level) and we
- // return no field.
- if len(fields) > 1 {
- return field{}, false
- }
- return fields[0], true
-}
-
-var fieldCache struct {
- sync.RWMutex
- m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
- fieldCache.RLock()
- f := fieldCache.m[t]
- fieldCache.RUnlock()
- if f != nil {
- return f
- }
-
- // Compute fields without lock.
- // Might duplicate effort but won't hold other computations back.
- f = typeFields(t)
- if f == nil {
- f = []field{}
- }
-
- fieldCache.Lock()
- if fieldCache.m == nil {
- fieldCache.m = map[reflect.Type][]field{}
- }
- fieldCache.m[t] = f
- fieldCache.Unlock()
- return f
-}
-
-func isValidTag(s string) bool {
- if s == "" {
- return false
- }
- for _, c := range s {
- switch {
- case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
- // Backslash and quote chars are reserved, but
- // otherwise any punctuation chars are allowed
- // in a tag name.
- default:
- if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
- return false
- }
- }
- }
- return true
-}
-
-const (
- caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
- kelvin = '\u212a'
- smallLongEss = '\u017f'
-)
-
-// foldFunc returns one of four different case folding equivalence
-// functions, from most general (and slow) to fastest:
-//
-// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
-// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
-// 3) asciiEqualFold, no special, but includes non-letters (including _)
-// 4) simpleLetterEqualFold, no specials, no non-letters.
-//
-// The letters S and K are special because they map to 3 runes, not just 2:
-// * S maps to s and to U+017F 'ſ' Latin small letter long s
-// * k maps to K and to U+212A 'K' Kelvin sign
-// See http://play.golang.org/p/tTxjOc0OGo
-//
-// The returned function is specialized for matching against s and
-// should only be given s. It's not curried for performance reasons.
-func foldFunc(s []byte) func(s, t []byte) bool {
- nonLetter := false
- special := false // special letter
- for _, b := range s {
- if b >= utf8.RuneSelf {
- return bytes.EqualFold
- }
- upper := b & caseMask
- if upper < 'A' || upper > 'Z' {
- nonLetter = true
- } else if upper == 'K' || upper == 'S' {
- // See above for why these letters are special.
- special = true
- }
- }
- if special {
- return equalFoldRight
- }
- if nonLetter {
- return asciiEqualFold
- }
- return simpleLetterEqualFold
-}
-
-// equalFoldRight is a specialization of bytes.EqualFold when s is
-// known to be all ASCII (including punctuation), but contains an 's',
-// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
-// See comments on foldFunc.
-func equalFoldRight(s, t []byte) bool {
- for _, sb := range s {
- if len(t) == 0 {
- return false
- }
- tb := t[0]
- if tb < utf8.RuneSelf {
- if sb != tb {
- sbUpper := sb & caseMask
- if 'A' <= sbUpper && sbUpper <= 'Z' {
- if sbUpper != tb&caseMask {
- return false
- }
- } else {
- return false
- }
- }
- t = t[1:]
- continue
- }
- // sb is ASCII and t is not. t must be either kelvin
- // sign or long s; sb must be s, S, k, or K.
- tr, size := utf8.DecodeRune(t)
- switch sb {
- case 's', 'S':
- if tr != smallLongEss {
- return false
- }
- case 'k', 'K':
- if tr != kelvin {
- return false
- }
- default:
- return false
- }
- t = t[size:]
-
- }
- if len(t) > 0 {
- return false
- }
- return true
-}
-
-// asciiEqualFold is a specialization of bytes.EqualFold for use when
-// s is all ASCII (but may contain non-letters) and contains no
-// special-folding letters.
-// See comments on foldFunc.
-func asciiEqualFold(s, t []byte) bool {
- if len(s) != len(t) {
- return false
- }
- for i, sb := range s {
- tb := t[i]
- if sb == tb {
- continue
- }
- if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
- if sb&caseMask != tb&caseMask {
- return false
- }
- } else {
- return false
- }
- }
- return true
-}
-
-// simpleLetterEqualFold is a specialization of bytes.EqualFold for
-// use when s is all ASCII letters (no underscores, etc) and also
-// doesn't contain 'k', 'K', 's', or 'S'.
-// See comments on foldFunc.
-func simpleLetterEqualFold(s, t []byte) bool {
- if len(s) != len(t) {
- return false
- }
- for i, b := range s {
- if b&caseMask != t[i]&caseMask {
- return false
- }
- }
- return true
-}
-
-// tagOptions is the string following a comma in a struct field's "json"
-// tag, or the empty string. It does not include the leading comma.
-type tagOptions string
-
-// parseTag splits a struct field's json tag into its name and
-// comma-separated options.
-func parseTag(tag string) (string, tagOptions) {
- if idx := strings.Index(tag, ","); idx != -1 {
- return tag[:idx], tagOptions(tag[idx+1:])
- }
- return tag, tagOptions("")
-}
-
-// Contains reports whether a comma-separated list of options
-// contains a particular substr flag. substr must be surrounded by a
-// string boundary or commas.
-func (o tagOptions) Contains(optionName string) bool {
- if len(o) == 0 {
- return false
- }
- s := string(o)
- for s != "" {
- var next string
- i := strings.Index(s, ",")
- if i >= 0 {
- s, next = s[:i], s[i+1:]
- }
- if s == optionName {
- return true
- }
- s = next
- }
- return false
-}
diff --git a/vendor/sigs.k8s.io/yaml/go.mod b/vendor/sigs.k8s.io/yaml/go.mod
deleted file mode 100644
index 7224f3497..000000000
--- a/vendor/sigs.k8s.io/yaml/go.mod
+++ /dev/null
@@ -1,8 +0,0 @@
-module sigs.k8s.io/yaml
-
-go 1.12
-
-require (
- github.com/davecgh/go-spew v1.1.1
- gopkg.in/yaml.v2 v2.2.8
-)
diff --git a/vendor/sigs.k8s.io/yaml/go.sum b/vendor/sigs.k8s.io/yaml/go.sum
deleted file mode 100644
index 76e49483a..000000000
--- a/vendor/sigs.k8s.io/yaml/go.sum
+++ /dev/null
@@ -1,9 +0,0 @@
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go
deleted file mode 100644
index efbc535d4..000000000
--- a/vendor/sigs.k8s.io/yaml/yaml.go
+++ /dev/null
@@ -1,380 +0,0 @@
-package yaml
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "reflect"
- "strconv"
-
- "gopkg.in/yaml.v2"
-)
-
-// Marshal marshals the object into JSON then converts JSON to YAML and returns the
-// YAML.
-func Marshal(o interface{}) ([]byte, error) {
- j, err := json.Marshal(o)
- if err != nil {
- return nil, fmt.Errorf("error marshaling into JSON: %v", err)
- }
-
- y, err := JSONToYAML(j)
- if err != nil {
- return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
- }
-
- return y, nil
-}
-
-// JSONOpt is a decoding option for decoding from JSON format.
-type JSONOpt func(*json.Decoder) *json.Decoder
-
-// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
-// optionally configuring the behavior of the JSON unmarshal.
-func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
- return yamlUnmarshal(y, o, false, opts...)
-}
-
-// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal
-// into an object, optionally configuring the behavior of the JSON unmarshal.
-func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error {
- return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...)
-}
-
-// yamlUnmarshal unmarshals the given YAML byte stream into the given interface,
-// optionally performing the unmarshalling strictly
-func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error {
- vo := reflect.ValueOf(o)
- unmarshalFn := yaml.Unmarshal
- if strict {
- unmarshalFn = yaml.UnmarshalStrict
- }
- j, err := yamlToJSON(y, &vo, unmarshalFn)
- if err != nil {
- return fmt.Errorf("error converting YAML to JSON: %v", err)
- }
-
- err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
- if err != nil {
- return fmt.Errorf("error unmarshaling JSON: %v", err)
- }
-
- return nil
-}
-
-// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
-// object, optionally applying decoder options prior to decoding. We are not
-// using json.Unmarshal directly as we want the chance to pass in non-default
-// options.
-func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
- d := json.NewDecoder(r)
- for _, opt := range opts {
- d = opt(d)
- }
- if err := d.Decode(&o); err != nil {
- return fmt.Errorf("while decoding JSON: %v", err)
- }
- return nil
-}
-
-// JSONToYAML Converts JSON to YAML.
-func JSONToYAML(j []byte) ([]byte, error) {
- // Convert the JSON to an object.
- var jsonObj interface{}
- // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
- // Go JSON library doesn't try to pick the right number type (int, float,
- // etc.) when unmarshalling to interface{}, it just picks float64
- // universally. go-yaml does go through the effort of picking the right
- // number type, so we can preserve number type throughout this process.
- err := yaml.Unmarshal(j, &jsonObj)
- if err != nil {
- return nil, err
- }
-
- // Marshal this object into YAML.
- return yaml.Marshal(jsonObj)
-}
-
-// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
-// passing JSON through this method should be a no-op.
-//
-// Things YAML can do that are not supported by JSON:
-// * In YAML you can have binary and null keys in your maps. These are invalid
-// in JSON. (int and float keys are converted to strings.)
-// * Binary data in YAML with the !!binary tag is not supported. If you want to
-// use binary data with this library, encode the data as base64 as usual but do
-// not use the !!binary tag in your YAML. This will ensure the original base64
-// encoded data makes it all the way through to the JSON.
-//
-// For strict decoding of YAML, use YAMLToJSONStrict.
-func YAMLToJSON(y []byte) ([]byte, error) {
- return yamlToJSON(y, nil, yaml.Unmarshal)
-}
-
-// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
-// returning an error on any duplicate field names.
-func YAMLToJSONStrict(y []byte) ([]byte, error) {
- return yamlToJSON(y, nil, yaml.UnmarshalStrict)
-}
-
-func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
- // Convert the YAML to an object.
- var yamlObj interface{}
- err := yamlUnmarshal(y, &yamlObj)
- if err != nil {
- return nil, err
- }
-
- // YAML objects are not completely compatible with JSON objects (e.g. you
- // can have non-string keys in YAML). So, convert the YAML-compatible object
- // to a JSON-compatible object, failing with an error if irrecoverable
- // incompatibilties happen along the way.
- jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
- if err != nil {
- return nil, err
- }
-
- // Convert this object to JSON and return the data.
- return json.Marshal(jsonObj)
-}
-
-func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
- var err error
-
- // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
- // interface). We pass decodingNull as false because we're not actually
- // decoding into the value, we're just checking if the ultimate target is a
- // string.
- if jsonTarget != nil {
- ju, tu, pv := indirect(*jsonTarget, false)
- // We have a JSON or Text Umarshaler at this level, so we can't be trying
- // to decode into a string.
- if ju != nil || tu != nil {
- jsonTarget = nil
- } else {
- jsonTarget = &pv
- }
- }
-
- // If yamlObj is a number or a boolean, check if jsonTarget is a string -
- // if so, coerce. Else return normal.
- // If yamlObj is a map or array, find the field that each key is
- // unmarshaling to, and when you recurse pass the reflect.Value for that
- // field back into this function.
- switch typedYAMLObj := yamlObj.(type) {
- case map[interface{}]interface{}:
- // JSON does not support arbitrary keys in a map, so we must convert
- // these keys to strings.
- //
- // From my reading of go-yaml v2 (specifically the resolve function),
- // keys can only have the types string, int, int64, float64, binary
- // (unsupported), or null (unsupported).
- strMap := make(map[string]interface{})
- for k, v := range typedYAMLObj {
- // Resolve the key to a string first.
- var keyString string
- switch typedKey := k.(type) {
- case string:
- keyString = typedKey
- case int:
- keyString = strconv.Itoa(typedKey)
- case int64:
- // go-yaml will only return an int64 as a key if the system
- // architecture is 32-bit and the key's value is between 32-bit
- // and 64-bit. Otherwise the key type will simply be int.
- keyString = strconv.FormatInt(typedKey, 10)
- case float64:
- // Stolen from go-yaml to use the same conversion to string as
- // the go-yaml library uses to convert float to string when
- // Marshaling.
- s := strconv.FormatFloat(typedKey, 'g', -1, 32)
- switch s {
- case "+Inf":
- s = ".inf"
- case "-Inf":
- s = "-.inf"
- case "NaN":
- s = ".nan"
- }
- keyString = s
- case bool:
- if typedKey {
- keyString = "true"
- } else {
- keyString = "false"
- }
- default:
- return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
- reflect.TypeOf(k), k, v)
- }
-
- // jsonTarget should be a struct or a map. If it's a struct, find
- // the field it's going to map to and pass its reflect.Value. If
- // it's a map, find the element type of the map and pass the
- // reflect.Value created from that type. If it's neither, just pass
- // nil - JSON conversion will error for us if it's a real issue.
- if jsonTarget != nil {
- t := *jsonTarget
- if t.Kind() == reflect.Struct {
- keyBytes := []byte(keyString)
- // Find the field that the JSON library would use.
- var f *field
- fields := cachedTypeFields(t.Type())
- for i := range fields {
- ff := &fields[i]
- if bytes.Equal(ff.nameBytes, keyBytes) {
- f = ff
- break
- }
- // Do case-insensitive comparison.
- if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
- f = ff
- }
- }
- if f != nil {
- // Find the reflect.Value of the most preferential
- // struct field.
- jtf := t.Field(f.index[0])
- strMap[keyString], err = convertToJSONableObject(v, &jtf)
- if err != nil {
- return nil, err
- }
- continue
- }
- } else if t.Kind() == reflect.Map {
- // Create a zero value of the map's element type to use as
- // the JSON target.
- jtv := reflect.Zero(t.Type().Elem())
- strMap[keyString], err = convertToJSONableObject(v, &jtv)
- if err != nil {
- return nil, err
- }
- continue
- }
- }
- strMap[keyString], err = convertToJSONableObject(v, nil)
- if err != nil {
- return nil, err
- }
- }
- return strMap, nil
- case []interface{}:
- // We need to recurse into arrays in case there are any
- // map[interface{}]interface{}'s inside and to convert any
- // numbers to strings.
-
- // If jsonTarget is a slice (which it really should be), find the
- // thing it's going to map to. If it's not a slice, just pass nil
- // - JSON conversion will error for us if it's a real issue.
- var jsonSliceElemValue *reflect.Value
- if jsonTarget != nil {
- t := *jsonTarget
- if t.Kind() == reflect.Slice {
- // By default slices point to nil, but we need a reflect.Value
- // pointing to a value of the slice type, so we create one here.
- ev := reflect.Indirect(reflect.New(t.Type().Elem()))
- jsonSliceElemValue = &ev
- }
- }
-
- // Make and use a new array.
- arr := make([]interface{}, len(typedYAMLObj))
- for i, v := range typedYAMLObj {
- arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
- if err != nil {
- return nil, err
- }
- }
- return arr, nil
- default:
- // If the target type is a string and the YAML type is a number,
- // convert the YAML type to a string.
- if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
- // Based on my reading of go-yaml, it may return int, int64,
- // float64, or uint64.
- var s string
- switch typedVal := typedYAMLObj.(type) {
- case int:
- s = strconv.FormatInt(int64(typedVal), 10)
- case int64:
- s = strconv.FormatInt(typedVal, 10)
- case float64:
- s = strconv.FormatFloat(typedVal, 'g', -1, 32)
- case uint64:
- s = strconv.FormatUint(typedVal, 10)
- case bool:
- if typedVal {
- s = "true"
- } else {
- s = "false"
- }
- }
- if len(s) > 0 {
- yamlObj = interface{}(s)
- }
- }
- return yamlObj, nil
- }
-}
-
-// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice,
-// without going through a byte representation. A nil or empty map[string]interface{} input is
-// converted to an empty map, i.e. yaml.MapSlice(nil).
-//
-// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice.
-//
-// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml:
-// - float64s are down-casted as far as possible without data-loss to int, int64, uint64.
-// - int64s are down-casted to int if possible without data-loss.
-//
-// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case.
-//
-// string, bool and any other types are unchanged.
-func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice {
- if len(j) == 0 {
- return nil
- }
- ret := make(yaml.MapSlice, 0, len(j))
- for k, v := range j {
- ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)})
- }
- return ret
-}
-
-func jsonToYAMLValue(j interface{}) interface{} {
- switch j := j.(type) {
- case map[string]interface{}:
- if j == nil {
- return interface{}(nil)
- }
- return JSONObjectToYAMLObject(j)
- case []interface{}:
- if j == nil {
- return interface{}(nil)
- }
- ret := make([]interface{}, len(j))
- for i := range j {
- ret[i] = jsonToYAMLValue(j[i])
- }
- return ret
- case float64:
- // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151
- if i64 := int64(j); j == float64(i64) {
- if i := int(i64); i64 == int64(i) {
- return i
- }
- return i64
- }
- if ui64 := uint64(j); j == float64(ui64) {
- return ui64
- }
- return j
- case int64:
- if i := int(j); j == int64(i) {
- return i
- }
- return j
- }
- return j
-}
diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go
deleted file mode 100644
index ab3e06a22..000000000
--- a/vendor/sigs.k8s.io/yaml/yaml_go110.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// This file contains changes that are only compatible with go 1.10 and onwards.
-
-// +build go1.10
-
-package yaml
-
-import "encoding/json"
-
-// DisallowUnknownFields configures the JSON decoder to error out if unknown
-// fields come along, instead of dropping them by default.
-func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
- d.DisallowUnknownFields()
- return d
-}