aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md12
-rw-r--r--vendor/github.com/containers/buildah/add.go12
-rw-r--r--vendor/github.com/containers/buildah/buildah.go2
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt13
-rw-r--r--vendor/github.com/containers/buildah/common.go3
-rw-r--r--vendor/github.com/containers/buildah/go.mod6
-rw-r--r--vendor/github.com/containers/buildah/go.sum24
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go5
-rw-r--r--vendor/github.com/containers/common/pkg/chown/chown.go122
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf6
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go2
-rw-r--r--vendor/github.com/containers/common/pkg/report/template.go16
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/docker/spdystream/CONTRIBUTING.md13
-rw-r--r--vendor/github.com/docker/spdystream/LICENSE.docs425
-rw-r--r--vendor/github.com/docker/spdystream/MAINTAINERS28
-rw-r--r--vendor/github.com/docker/spdystream/README.md77
-rw-r--r--vendor/github.com/docker/spdystream/connection.go958
-rw-r--r--vendor/github.com/docker/spdystream/handlers.go38
-rw-r--r--vendor/github.com/docker/spdystream/priority.go98
-rw-r--r--vendor/github.com/docker/spdystream/spdy/dictionary.go187
-rw-r--r--vendor/github.com/docker/spdystream/spdy/read.go348
-rw-r--r--vendor/github.com/docker/spdystream/spdy/types.go275
-rw-r--r--vendor/github.com/docker/spdystream/spdy/write.go318
-rw-r--r--vendor/github.com/docker/spdystream/stream.go327
-rw-r--r--vendor/github.com/docker/spdystream/utils.go16
-rw-r--r--vendor/github.com/magefile/mage/LICENSE (renamed from vendor/github.com/docker/spdystream/LICENSE)14
-rw-r--r--vendor/github.com/magefile/mage/mg/color.go80
-rw-r--r--vendor/github.com/magefile/mage/mg/color_string.go38
-rw-r--r--vendor/github.com/magefile/mage/mg/deps.go352
-rw-r--r--vendor/github.com/magefile/mage/mg/errors.go51
-rw-r--r--vendor/github.com/magefile/mage/mg/runtime.go136
-rw-r--r--vendor/github.com/magefile/mage/sh/cmd.go177
-rw-r--r--vendor/github.com/magefile/mage/sh/helpers.go40
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go3
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go4
-rw-r--r--vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go144
-rw-r--r--vendor/github.com/sirupsen/logrus/.travis.yml13
-rw-r--r--vendor/github.com/sirupsen/logrus/CHANGELOG.md27
-rw-r--r--vendor/github.com/sirupsen/logrus/README.md2
-rw-r--r--vendor/github.com/sirupsen/logrus/entry.go65
-rw-r--r--vendor/github.com/sirupsen/logrus/go.mod1
-rw-r--r--vendor/github.com/sirupsen/logrus/go.sum2
-rw-r--r--vendor/github.com/sirupsen/logrus/json_formatter.go2
-rw-r--r--vendor/github.com/sirupsen/logrus/logger.go2
-rw-r--r--vendor/github.com/sirupsen/logrus/magefile.go77
-rw-r--r--vendor/github.com/sirupsen/logrus/terminal_check_unix.go2
-rw-r--r--vendor/github.com/sirupsen/logrus/text_formatter.go2
-rw-r--r--vendor/github.com/spf13/cobra/.golangci.yml48
-rw-r--r--vendor/github.com/spf13/cobra/.travis.yml9
-rw-r--r--vendor/github.com/spf13/cobra/CHANGELOG.md35
-rw-r--r--vendor/github.com/spf13/cobra/CONDUCT.md37
-rw-r--r--vendor/github.com/spf13/cobra/Makefile18
-rw-r--r--vendor/github.com/spf13/cobra/README.md32
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.go133
-rw-r--r--vendor/github.com/spf13/cobra/bash_completions.md2
-rw-r--r--vendor/github.com/spf13/cobra/cobra.go15
-rw-r--r--vendor/github.com/spf13/cobra/command.go116
-rw-r--r--vendor/github.com/spf13/cobra/custom_completions.go4
-rw-r--r--vendor/github.com/spf13/cobra/fish_completions.go6
-rw-r--r--vendor/github.com/spf13/cobra/go.mod2
-rw-r--r--vendor/github.com/spf13/cobra/go.sum4
-rw-r--r--vendor/github.com/spf13/cobra/powershell_completions.go323
-rw-r--r--vendor/github.com/spf13/cobra/powershell_completions.md15
-rw-r--r--vendor/github.com/spf13/cobra/projects_using_cobra.md3
-rw-r--r--vendor/github.com/spf13/cobra/shell_completions.md119
-rw-r--r--vendor/github.com/spf13/cobra/zsh_completions.go4
67 files changed, 1981 insertions, 3511 deletions
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index ccf46b324..13e7ad24b 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,6 +2,18 @@
# Changelog
+## v1.19.6 (2021-02-18)
+ Bump c/containers/storage v1.24.6
+ Don't fail copy to emptydir
+ Workaround for RHEL gating test failure
+ Fix config-flags-verification test on F33
+ Fix bud capabilities test
+ Stop overriding the location of the blob info cache
+ Fix caching layers with build args
+
+## v1.19.5 (2021-02-09)
+ Vendor in latest containers/image and common
+
## v1.19.4 (2021-02-06)
run: fix check for host pid namespace
bump containernetworking/cni library to v0.8.1 - fix for CVE-2021-20206
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index dd69d45cf..0903fc7db 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -303,6 +303,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
renameTarget = filepath.Base(extractDirectory)
extractDirectory = filepath.Dir(extractDirectory)
}
+
+ // if the destination is a directory that doesn't yet exist, let's copy it.
+ newDestDirFound := false
+ if (len(destStats) == 1 || len(destStats[0].Globbed) == 0) && destMustBeDirectory && !destCanBeFile {
+ newDestDirFound = true
+ }
+
if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
if destMustBeDirectory {
return errors.Errorf("destination %v already exists but is not a directory", destination)
@@ -415,6 +422,11 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if !globInfo.IsDir || !includeDirectoryAnyway(rel, pm) {
continue
}
+ } else {
+ // if the destination is a directory that doesn't yet exist, and is not excluded, let's copy it.
+ if newDestDirFound {
+ itemsCopied++
+ }
}
} else {
// Make sure we don't trigger a "copied nothing" error for an empty context
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 7065e00e4..dd43ea99a 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -28,7 +28,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.19.4"
+ Version = "1.19.6"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index 4a0f81b04..25da04663 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,16 @@
+
+- Changelog for v1.19.6 (2021-02-18)
+ * Bump c/containers/storage v1.24.6
+ * Don't fail copy to emptydir
+ * Workaround for RHEL gating test failure
+ * Fix config-flags-verification test on F33
+ * Fix bud capabilities test
+ * Stop overriding the location of the blob info cache
+ * Fix caching layers with build args
+
+- Changelog for v1.19.5 (2021-02-09)
+ * Vendor in latest containers/image and common
+
- Changelog for v1.19.4 (2021-02-06)
* run: fix check for host pid namespace
* bump containernetworking/cni library to v0.8.1 - fix for CVE-2021-20206
diff --git a/vendor/github.com/containers/buildah/common.go b/vendor/github.com/containers/buildah/common.go
index 594362300..5219c6b78 100644
--- a/vendor/github.com/containers/buildah/common.go
+++ b/vendor/github.com/containers/buildah/common.go
@@ -56,9 +56,6 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat
sc.SignaturePolicyPath = signaturePolicyPath
}
if store != nil {
- if sc.BlobInfoCacheDir == "" {
- sc.BlobInfoCacheDir = filepath.Join(store.GraphRoot(), "cache")
- }
if sc.SystemRegistriesConfPath == "" && unshare.IsRootless() {
userRegistriesFile := filepath.Join(store.GraphRoot(), "registries.conf")
if _, err := os.Stat(userRegistriesFile); err == nil {
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 17469ad12..ee3c28d50 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -5,10 +5,10 @@ go 1.12
require (
github.com/containerd/containerd v1.4.1 // indirect
github.com/containernetworking/cni v0.8.1
- github.com/containers/common v0.33.1
- github.com/containers/image/v5 v5.10.1
+ github.com/containers/common v0.33.4
+ github.com/containers/image/v5 v5.10.2
github.com/containers/ocicrypt v1.0.3
- github.com/containers/storage v1.24.5
+ github.com/containers/storage v1.24.6
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index cab904fcf..9b6e62ff4 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -78,19 +78,19 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containers/common v0.33.1 h1:XpDiq8Cta8+u1s4kpYSEWdB140ZmqgyIXfWkLqKx3z0=
-github.com/containers/common v0.33.1/go.mod h1:mjDo/NKeweL/onaspLhZ38WnHXaYmrELHclIdvSnYpY=
-github.com/containers/image/v5 v5.9.0 h1:dRmUtcluQcmasNo3DpnRoZjfU0rOu1qZeL6wlDJr10Q=
-github.com/containers/image/v5 v5.9.0/go.mod h1:blOEFd/iFdeyh891ByhCVUc+xAcaI3gBegXECwz9UbQ=
-github.com/containers/image/v5 v5.10.1 h1:tHhGQ8RCMxJfJLD/PEW1qrOKX8nndledW9qz6UiAxns=
+github.com/containers/common v0.33.4 h1:f1jowItfo6xw0bZGZq8oE5dw1pBIkldqB0FqW+HHzG8=
+github.com/containers/common v0.33.4/go.mod h1:PhgL71XuC4jJ/1BIqeP7doke3aMFkCP90YBXwDeUr9g=
github.com/containers/image/v5 v5.10.1/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs=
+github.com/containers/image/v5 v5.10.2 h1:STD9GYR9p/X0qTLmBYsyx8dEM7zQW+qZ8KHoL/64fkg=
+github.com/containers/image/v5 v5.10.2/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
-github.com/containers/storage v1.23.7/go.mod h1:cUT2zHjtx+WlVri30obWmM2gpqpi8jfPsmIzP1TVpEI=
github.com/containers/storage v1.24.5 h1:BusfdU0rCS2/Daa/DPw+0iLfGRlYA7UVF7D0el3N7Vk=
github.com/containers/storage v1.24.5/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
+github.com/containers/storage v1.24.6 h1:9PBb9PoGuj5B/3MGfxx//RmUjMAklmx3rBbuCkuIc94=
+github.com/containers/storage v1.24.6/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -235,9 +235,6 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8h3jc=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc=
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
@@ -333,8 +330,6 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445/go.m
github.com/opencontainers/runtime-tools v0.9.0 h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=
github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
-github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/openshift/imagebuilder v1.1.8 h1:gjiIl8pbNj0eC4XWvFJHATdDvYm64p9/pLDLQWoLZPA=
@@ -416,8 +411,6 @@ github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ=
-github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I=
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@@ -425,16 +418,12 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
-github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
-github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg=
github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -550,7 +539,6 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 13631108e..30c5519f3 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -834,11 +834,12 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// Check if there's already an image based on our parent that
// has the same change that we're about to make, so far as we
// can tell.
- // Only do this if there were no build args given by the user,
+ // Only do this if the step we are on is not an ARG step,
// we need to call ib.Run() to correctly put the args together before
// determining if a cached layer with the same build args already exists
// and that is done in the if block below.
- if checkForLayers && len(s.builder.Args) == 0 {
+ if checkForLayers && step.Command != "arg" {
+
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
return "", nil, errors.Wrap(err, "error checking if cached image exists from a previous build")
diff --git a/vendor/github.com/containers/common/pkg/chown/chown.go b/vendor/github.com/containers/common/pkg/chown/chown.go
new file mode 100644
index 000000000..fe794304e
--- /dev/null
+++ b/vendor/github.com/containers/common/pkg/chown/chown.go
@@ -0,0 +1,122 @@
+package chown
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+ "syscall"
+
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/pkg/errors"
+)
+
+// DangerousHostPath validates if a host path is dangerous and should not be modified
+func DangerousHostPath(path string) (bool, error) {
+ excludePaths := map[string]bool{
+ "/": true,
+ "/bin": true,
+ "/boot": true,
+ "/dev": true,
+ "/etc": true,
+ "/etc/passwd": true,
+ "/etc/pki": true,
+ "/etc/shadow": true,
+ "/home": true,
+ "/lib": true,
+ "/lib64": true,
+ "/media": true,
+ "/opt": true,
+ "/proc": true,
+ "/root": true,
+ "/run": true,
+ "/sbin": true,
+ "/srv": true,
+ "/sys": true,
+ "/tmp": true,
+ "/usr": true,
+ "/var": true,
+ "/var/lib": true,
+ "/var/log": true,
+ }
+
+ if home := homedir.Get(); home != "" {
+ excludePaths[home] = true
+ }
+
+ if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" {
+ if usr, err := user.Lookup(sudoUser); err == nil {
+ excludePaths[usr.HomeDir] = true
+ }
+ }
+
+ absPath, err := filepath.Abs(path)
+ if err != nil {
+ return true, err
+ }
+
+ realPath, err := filepath.EvalSymlinks(absPath)
+ if err != nil {
+ return true, err
+ }
+
+ if excludePaths[realPath] {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// ChangeHostPathOwnership changes the uid and gid ownership of a directory or file within the host.
+// This is used by the volume U flag to change source volumes ownership
+func ChangeHostPathOwnership(path string, recursive bool, uid, gid int) error {
+ // Validate if host path can be chowned
+ isDangerous, err := DangerousHostPath(path)
+ if err != nil {
+ return errors.Wrapf(err, "failed to validate if host path is dangerous")
+ }
+
+ if isDangerous {
+ return errors.Errorf("chowning host path %q is not allowed. You can manually `chown -R %d:%d %s`", path, uid, gid, path)
+ }
+
+ // Chown host path
+ if recursive {
+ err := filepath.Walk(path, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Get current ownership
+ currentUID := int(f.Sys().(*syscall.Stat_t).Uid)
+ currentGID := int(f.Sys().(*syscall.Stat_t).Gid)
+
+ if uid != currentUID || gid != currentGID {
+ return os.Lchown(filePath, uid, gid)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return errors.Wrapf(err, "failed to chown recursively host path")
+ }
+ } else {
+ // Get host path info
+ f, err := os.Lstat(path)
+ if err != nil {
+ return errors.Wrapf(err, "failed to get host path information")
+ }
+
+ // Get current ownership
+ currentUID := int(f.Sys().(*syscall.Stat_t).Uid)
+ currentGID := int(f.Sys().(*syscall.Stat_t).Gid)
+
+ if uid != currentUID || gid != currentGID {
+ if err := os.Lchown(path, uid, gid); err != nil {
+ return errors.Wrapf(err, "failed to chown host path")
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 18243f296..0114f2975 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -73,7 +73,6 @@ default_capabilities = [
"SYS_CHROOT"
]
-
# A list of sysctls to be set in containers by default,
# specified as "name=value",
# for example:"net.ipv4.ping_group_range = 0 0".
@@ -241,6 +240,9 @@ default_sysctls = [
#
# cni_plugin_dirs = ["/usr/libexec/cni"]
+# The network name of the default CNI network to attach pods to.
+# default_network = "podman"
+
# Path to the directory where CNI configuration files are located.
#
# network_config_dir = "/etc/cni/net.d/"
@@ -324,7 +326,7 @@ default_sysctls = [
# associated with the pod. This container does nothing other then sleep,
# reserving the pods resources for the lifetime of the pod.
#
-# infra_image = "k8s.gcr.io/pause:3.2"
+# infra_image = "k8s.gcr.io/pause:3.4.1"
# Specify the locking mechanism to use; valid values are "shm" and "file".
# Change the default only if you are sure of what you are doing, in general
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 918ce93e5..57f64c395 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -45,7 +45,7 @@ var (
// DefaultInitPath is the default path to the container-init binary
DefaultInitPath = "/usr/libexec/podman/catatonit"
// DefaultInfraImage to use for infra container
- DefaultInfraImage = "k8s.gcr.io/pause:3.2"
+ DefaultInfraImage = "k8s.gcr.io/pause:3.4.1"
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
// DefaultDetachKeys is the default keys sequence for detaching a
diff --git a/vendor/github.com/containers/common/pkg/report/template.go b/vendor/github.com/containers/common/pkg/report/template.go
index 559c1625b..f7b4506bb 100644
--- a/vendor/github.com/containers/common/pkg/report/template.go
+++ b/vendor/github.com/containers/common/pkg/report/template.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/json"
"reflect"
+ "regexp"
"strings"
"text/template"
@@ -155,3 +156,18 @@ func (t *Template) Funcs(funcMap FuncMap) *Template {
func (t *Template) IsTable() bool {
return t.isTable
}
+
+var rangeRegex = regexp.MustCompile(`{{\s*range\s*\.\s*}}.*{{\s*end\s*}}`)
+
+// EnforceRange ensures that the format string contains a range
+func EnforceRange(format string) string {
+ if !rangeRegex.MatchString(format) {
+ return "{{range .}}" + format + "{{end}}"
+ }
+ return format
+}
+
+// HasTable returns whether the format is a table
+func HasTable(format string) bool {
+ return strings.HasPrefix(format, "table ")
+}
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index 8efc8b8a2..ff95a6522 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.34.3-dev"
+const Version = "0.35.0"
diff --git a/vendor/github.com/docker/spdystream/CONTRIBUTING.md b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
deleted file mode 100644
index d4eddcc53..000000000
--- a/vendor/github.com/docker/spdystream/CONTRIBUTING.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Contributing to SpdyStream
-
-Want to hack on spdystream? Awesome! Here are instructions to get you
-started.
-
-SpdyStream is a part of the [Docker](https://docker.io) project, and follows
-the same rules and principles. If you're already familiar with the way
-Docker does things, you'll feel right at home.
-
-Otherwise, go read
-[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
-
-Happy hacking!
diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs
deleted file mode 100644
index e26cd4fc8..000000000
--- a/vendor/github.com/docker/spdystream/LICENSE.docs
+++ /dev/null
@@ -1,425 +0,0 @@
-Attribution-ShareAlike 4.0 International
-
-=======================================================================
-
-Creative Commons Corporation ("Creative Commons") is not a law firm and
-does not provide legal services or legal advice. Distribution of
-Creative Commons public licenses does not create a lawyer-client or
-other relationship. Creative Commons makes its licenses and related
-information available on an "as-is" basis. Creative Commons gives no
-warranties regarding its licenses, any material licensed under their
-terms and conditions, or any related information. Creative Commons
-disclaims all liability for damages resulting from their use to the
-fullest extent possible.
-
-Using Creative Commons Public Licenses
-
-Creative Commons public licenses provide a standard set of terms and
-conditions that creators and other rights holders may use to share
-original works of authorship and other material subject to copyright
-and certain other rights specified in the public license below. The
-following considerations are for informational purposes only, are not
-exhaustive, and do not form part of our licenses.
-
- Considerations for licensors: Our public licenses are
- intended for use by those authorized to give the public
- permission to use material in ways otherwise restricted by
- copyright and certain other rights. Our licenses are
- irrevocable. Licensors should read and understand the terms
- and conditions of the license they choose before applying it.
- Licensors should also secure all rights necessary before
- applying our licenses so that the public can reuse the
- material as expected. Licensors should clearly mark any
- material not subject to the license. This includes other CC-
- licensed material, or material used under an exception or
- limitation to copyright. More considerations for licensors:
- wiki.creativecommons.org/Considerations_for_licensors
-
- Considerations for the public: By using one of our public
- licenses, a licensor grants the public permission to use the
- licensed material under specified terms and conditions. If
- the licensor's permission is not necessary for any reason--for
- example, because of any applicable exception or limitation to
- copyright--then that use is not regulated by the license. Our
- licenses grant only permissions under copyright and certain
- other rights that a licensor has authority to grant. Use of
- the licensed material may still be restricted for other
- reasons, including because others have copyright or other
- rights in the material. A licensor may make special requests,
- such as asking that all changes be marked or described.
- Although not required by our licenses, you are encouraged to
- respect those requests where reasonable. More_considerations
- for the public:
- wiki.creativecommons.org/Considerations_for_licensees
-
-=======================================================================
-
-Creative Commons Attribution-ShareAlike 4.0 International Public
-License
-
-By exercising the Licensed Rights (defined below), You accept and agree
-to be bound by the terms and conditions of this Creative Commons
-Attribution-ShareAlike 4.0 International Public License ("Public
-License"). To the extent this Public License may be interpreted as a
-contract, You are granted the Licensed Rights in consideration of Your
-acceptance of these terms and conditions, and the Licensor grants You
-such rights in consideration of benefits the Licensor receives from
-making the Licensed Material available under these terms and
-conditions.
-
-
-Section 1 -- Definitions.
-
- a. Adapted Material means material subject to Copyright and Similar
- Rights that is derived from or based upon the Licensed Material
- and in which the Licensed Material is translated, altered,
- arranged, transformed, or otherwise modified in a manner requiring
- permission under the Copyright and Similar Rights held by the
- Licensor. For purposes of this Public License, where the Licensed
- Material is a musical work, performance, or sound recording,
- Adapted Material is always produced where the Licensed Material is
- synched in timed relation with a moving image.
-
- b. Adapter's License means the license You apply to Your Copyright
- and Similar Rights in Your contributions to Adapted Material in
- accordance with the terms and conditions of this Public License.
-
- c. BY-SA Compatible License means a license listed at
- creativecommons.org/compatiblelicenses, approved by Creative
- Commons as essentially the equivalent of this Public License.
-
- d. Copyright and Similar Rights means copyright and/or similar rights
- closely related to copyright including, without limitation,
- performance, broadcast, sound recording, and Sui Generis Database
- Rights, without regard to how the rights are labeled or
- categorized. For purposes of this Public License, the rights
- specified in Section 2(b)(1)-(2) are not Copyright and Similar
- Rights.
-
- e. Effective Technological Measures means those measures that, in the
- absence of proper authority, may not be circumvented under laws
- fulfilling obligations under Article 11 of the WIPO Copyright
- Treaty adopted on December 20, 1996, and/or similar international
- agreements.
-
- f. Exceptions and Limitations means fair use, fair dealing, and/or
- any other exception or limitation to Copyright and Similar Rights
- that applies to Your use of the Licensed Material.
-
- g. License Elements means the license attributes listed in the name
- of a Creative Commons Public License. The License Elements of this
- Public License are Attribution and ShareAlike.
-
- h. Licensed Material means the artistic or literary work, database,
- or other material to which the Licensor applied this Public
- License.
-
- i. Licensed Rights means the rights granted to You subject to the
- terms and conditions of this Public License, which are limited to
- all Copyright and Similar Rights that apply to Your use of the
- Licensed Material and that the Licensor has authority to license.
-
- j. Licensor means the individual(s) or entity(ies) granting rights
- under this Public License.
-
- k. Share means to provide material to the public by any means or
- process that requires permission under the Licensed Rights, such
- as reproduction, public display, public performance, distribution,
- dissemination, communication, or importation, and to make material
- available to the public including in ways that members of the
- public may access the material from a place and at a time
- individually chosen by them.
-
- l. Sui Generis Database Rights means rights other than copyright
- resulting from Directive 96/9/EC of the European Parliament and of
- the Council of 11 March 1996 on the legal protection of databases,
- as amended and/or succeeded, as well as other essentially
- equivalent rights anywhere in the world.
-
- m. You means the individual or entity exercising the Licensed Rights
- under this Public License. Your has a corresponding meaning.
-
-
-Section 2 -- Scope.
-
- a. License grant.
-
- 1. Subject to the terms and conditions of this Public License,
- the Licensor hereby grants You a worldwide, royalty-free,
- non-sublicensable, non-exclusive, irrevocable license to
- exercise the Licensed Rights in the Licensed Material to:
-
- a. reproduce and Share the Licensed Material, in whole or
- in part; and
-
- b. produce, reproduce, and Share Adapted Material.
-
- 2. Exceptions and Limitations. For the avoidance of doubt, where
- Exceptions and Limitations apply to Your use, this Public
- License does not apply, and You do not need to comply with
- its terms and conditions.
-
- 3. Term. The term of this Public License is specified in Section
- 6(a).
-
- 4. Media and formats; technical modifications allowed. The
- Licensor authorizes You to exercise the Licensed Rights in
- all media and formats whether now known or hereafter created,
- and to make technical modifications necessary to do so. The
- Licensor waives and/or agrees not to assert any right or
- authority to forbid You from making technical modifications
- necessary to exercise the Licensed Rights, including
- technical modifications necessary to circumvent Effective
- Technological Measures. For purposes of this Public License,
- simply making modifications authorized by this Section 2(a)
- (4) never produces Adapted Material.
-
- 5. Downstream recipients.
-
- a. Offer from the Licensor -- Licensed Material. Every
- recipient of the Licensed Material automatically
- receives an offer from the Licensor to exercise the
- Licensed Rights under the terms and conditions of this
- Public License.
-
- b. Additional offer from the Licensor -- Adapted Material.
- Every recipient of Adapted Material from You
- automatically receives an offer from the Licensor to
- exercise the Licensed Rights in the Adapted Material
- under the conditions of the Adapter's License You apply.
-
- c. No downstream restrictions. You may not offer or impose
- any additional or different terms or conditions on, or
- apply any Effective Technological Measures to, the
- Licensed Material if doing so restricts exercise of the
- Licensed Rights by any recipient of the Licensed
- Material.
-
- 6. No endorsement. Nothing in this Public License constitutes or
- may be construed as permission to assert or imply that You
- are, or that Your use of the Licensed Material is, connected
- with, or sponsored, endorsed, or granted official status by,
- the Licensor or others designated to receive attribution as
- provided in Section 3(a)(1)(A)(i).
-
- b. Other rights.
-
- 1. Moral rights, such as the right of integrity, are not
- licensed under this Public License, nor are publicity,
- privacy, and/or other similar personality rights; however, to
- the extent possible, the Licensor waives and/or agrees not to
- assert any such rights held by the Licensor to the limited
- extent necessary to allow You to exercise the Licensed
- Rights, but not otherwise.
-
- 2. Patent and trademark rights are not licensed under this
- Public License.
-
- 3. To the extent possible, the Licensor waives any right to
- collect royalties from You for the exercise of the Licensed
- Rights, whether directly or through a collecting society
- under any voluntary or waivable statutory or compulsory
- licensing scheme. In all other cases the Licensor expressly
- reserves any right to collect such royalties.
-
-
-Section 3 -- License Conditions.
-
-Your exercise of the Licensed Rights is expressly made subject to the
-following conditions.
-
- a. Attribution.
-
- 1. If You Share the Licensed Material (including in modified
- form), You must:
-
- a. retain the following if it is supplied by the Licensor
- with the Licensed Material:
-
- i. identification of the creator(s) of the Licensed
- Material and any others designated to receive
- attribution, in any reasonable manner requested by
- the Licensor (including by pseudonym if
- designated);
-
- ii. a copyright notice;
-
- iii. a notice that refers to this Public License;
-
- iv. a notice that refers to the disclaimer of
- warranties;
-
- v. a URI or hyperlink to the Licensed Material to the
- extent reasonably practicable;
-
- b. indicate if You modified the Licensed Material and
- retain an indication of any previous modifications; and
-
- c. indicate the Licensed Material is licensed under this
- Public License, and include the text of, or the URI or
- hyperlink to, this Public License.
-
- 2. You may satisfy the conditions in Section 3(a)(1) in any
- reasonable manner based on the medium, means, and context in
- which You Share the Licensed Material. For example, it may be
- reasonable to satisfy the conditions by providing a URI or
- hyperlink to a resource that includes the required
- information.
-
- 3. If requested by the Licensor, You must remove any of the
- information required by Section 3(a)(1)(A) to the extent
- reasonably practicable.
-
- b. ShareAlike.
-
- In addition to the conditions in Section 3(a), if You Share
- Adapted Material You produce, the following conditions also apply.
-
- 1. The Adapter's License You apply must be a Creative Commons
- license with the same License Elements, this version or
- later, or a BY-SA Compatible License.
-
- 2. You must include the text of, or the URI or hyperlink to, the
- Adapter's License You apply. You may satisfy this condition
- in any reasonable manner based on the medium, means, and
- context in which You Share Adapted Material.
-
- 3. You may not offer or impose any additional or different terms
- or conditions on, or apply any Effective Technological
- Measures to, Adapted Material that restrict exercise of the
- rights granted under the Adapter's License You apply.
-
-
-Section 4 -- Sui Generis Database Rights.
-
-Where the Licensed Rights include Sui Generis Database Rights that
-apply to Your use of the Licensed Material:
-
- a. for the avoidance of doubt, Section 2(a)(1) grants You the right
- to extract, reuse, reproduce, and Share all or a substantial
- portion of the contents of the database;
-
- b. if You include all or a substantial portion of the database
- contents in a database in which You have Sui Generis Database
- Rights, then the database in which You have Sui Generis Database
- Rights (but not its individual contents) is Adapted Material,
-
- including for purposes of Section 3(b); and
- c. You must comply with the conditions in Section 3(a) if You Share
- all or a substantial portion of the contents of the database.
-
-For the avoidance of doubt, this Section 4 supplements and does not
-replace Your obligations under this Public License where the Licensed
-Rights include other Copyright and Similar Rights.
-
-
-Section 5 -- Disclaimer of Warranties and Limitation of Liability.
-
- a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
- EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
- AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
- ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
- IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
- WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
- PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
- ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
- KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
- ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
-
- b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
- TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
- NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
- INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
- COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
- USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
- ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
- DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
- IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
-
- c. The disclaimer of warranties and limitation of liability provided
- above shall be interpreted in a manner that, to the extent
- possible, most closely approximates an absolute disclaimer and
- waiver of all liability.
-
-
-Section 6 -- Term and Termination.
-
- a. This Public License applies for the term of the Copyright and
- Similar Rights licensed here. However, if You fail to comply with
- this Public License, then Your rights under this Public License
- terminate automatically.
-
- b. Where Your right to use the Licensed Material has terminated under
- Section 6(a), it reinstates:
-
- 1. automatically as of the date the violation is cured, provided
- it is cured within 30 days of Your discovery of the
- violation; or
-
- 2. upon express reinstatement by the Licensor.
-
- For the avoidance of doubt, this Section 6(b) does not affect any
- right the Licensor may have to seek remedies for Your violations
- of this Public License.
-
- c. For the avoidance of doubt, the Licensor may also offer the
- Licensed Material under separate terms or conditions or stop
- distributing the Licensed Material at any time; however, doing so
- will not terminate this Public License.
-
- d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
- License.
-
-
-Section 7 -- Other Terms and Conditions.
-
- a. The Licensor shall not be bound by any additional or different
- terms or conditions communicated by You unless expressly agreed.
-
- b. Any arrangements, understandings, or agreements regarding the
- Licensed Material not stated herein are separate from and
- independent of the terms and conditions of this Public License.
-
-
-Section 8 -- Interpretation.
-
- a. For the avoidance of doubt, this Public License does not, and
- shall not be interpreted to, reduce, limit, restrict, or impose
- conditions on any use of the Licensed Material that could lawfully
- be made without permission under this Public License.
-
- b. To the extent possible, if any provision of this Public License is
- deemed unenforceable, it shall be automatically reformed to the
- minimum extent necessary to make it enforceable. If the provision
- cannot be reformed, it shall be severed from this Public License
- without affecting the enforceability of the remaining terms and
- conditions.
-
- c. No term or condition of this Public License will be waived and no
- failure to comply consented to unless expressly agreed to by the
- Licensor.
-
- d. Nothing in this Public License constitutes or may be interpreted
- as a limitation upon, or waiver of, any privileges and immunities
- that apply to the Licensor or You, including from the legal
- processes of any jurisdiction or authority.
-
-
-=======================================================================
-
-Creative Commons is not a party to its public licenses.
-Notwithstanding, Creative Commons may elect to apply one of its public
-licenses to material it publishes and in those instances will be
-considered the "Licensor." Except for the limited purpose of indicating
-that material is shared under a Creative Commons public license or as
-otherwise permitted by the Creative Commons policies published at
-creativecommons.org/policies, Creative Commons does not authorize the
-use of the trademark "Creative Commons" or any other trademark or logo
-of Creative Commons without its prior written consent including,
-without limitation, in connection with any unauthorized modifications
-to any of its public licenses or any other arrangements,
-understandings, or agreements concerning use of licensed material. For
-the avoidance of doubt, this paragraph does not form part of the public
-licenses.
-
-Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/spdystream/MAINTAINERS b/vendor/github.com/docker/spdystream/MAINTAINERS
deleted file mode 100644
index 14e263325..000000000
--- a/vendor/github.com/docker/spdystream/MAINTAINERS
+++ /dev/null
@@ -1,28 +0,0 @@
-# Spdystream maintainers file
-#
-# This file describes who runs the docker/spdystream project and how.
-# This is a living document - if you see something out of date or missing, speak up!
-#
-# It is structured to be consumable by both humans and programs.
-# To extract its contents programmatically, use any TOML-compliant parser.
-#
-# This file is compiled into the MAINTAINERS file in docker/opensource.
-#
-[Org]
- [Org."Core maintainers"]
- people = [
- "dmcgowan",
- ]
-
-[people]
-
-# A reference list of all people associated with the project.
-# All other sections should refer to people by their canonical key
-# in the people section.
-
- # ADD YOURSELF HERE IN ALPHABETICAL ORDER
-
- [people.dmcgowan]
- Name = "Derek McGowan"
- Email = "derek@docker.com"
- GitHub = "dmcgowan"
diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md
deleted file mode 100644
index 11cccd0a0..000000000
--- a/vendor/github.com/docker/spdystream/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-# SpdyStream
-
-A multiplexed stream library using spdy
-
-## Usage
-
-Client example (connecting to mirroring server without auth)
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/docker/spdystream"
- "net"
- "net/http"
-)
-
-func main() {
- conn, err := net.Dial("tcp", "localhost:8080")
- if err != nil {
- panic(err)
- }
- spdyConn, err := spdystream.NewConnection(conn, false)
- if err != nil {
- panic(err)
- }
- go spdyConn.Serve(spdystream.NoOpStreamHandler)
- stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
- if err != nil {
- panic(err)
- }
-
- stream.Wait()
-
- fmt.Fprint(stream, "Writing to stream")
-
- buf := make([]byte, 25)
- stream.Read(buf)
- fmt.Println(string(buf))
-
- stream.Close()
-}
-```
-
-Server example (mirroring server without auth)
-
-```go
-package main
-
-import (
- "github.com/docker/spdystream"
- "net"
-)
-
-func main() {
- listener, err := net.Listen("tcp", "localhost:8080")
- if err != nil {
- panic(err)
- }
- for {
- conn, err := listener.Accept()
- if err != nil {
- panic(err)
- }
- spdyConn, err := spdystream.NewConnection(conn, true)
- if err != nil {
- panic(err)
- }
- go spdyConn.Serve(spdystream.MirrorStreamHandler)
- }
-}
-```
-
-## Copyright and license
-
-Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go
deleted file mode 100644
index 6031a0db1..000000000
--- a/vendor/github.com/docker/spdystream/connection.go
+++ /dev/null
@@ -1,958 +0,0 @@
-package spdystream
-
-import (
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "sync"
- "time"
-
- "github.com/docker/spdystream/spdy"
-)
-
-var (
- ErrInvalidStreamId = errors.New("Invalid stream id")
- ErrTimeout = errors.New("Timeout occured")
- ErrReset = errors.New("Stream reset")
- ErrWriteClosedStream = errors.New("Write on closed stream")
-)
-
-const (
- FRAME_WORKERS = 5
- QUEUE_SIZE = 50
-)
-
-type StreamHandler func(stream *Stream)
-
-type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
-
-type idleAwareFramer struct {
- f *spdy.Framer
- conn *Connection
- writeLock sync.Mutex
- resetChan chan struct{}
- setTimeoutLock sync.Mutex
- setTimeoutChan chan time.Duration
- timeout time.Duration
-}
-
-func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
- iaf := &idleAwareFramer{
- f: framer,
- resetChan: make(chan struct{}, 2),
- // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
- // the same time the connection is being closed
- setTimeoutChan: make(chan time.Duration, 1),
- }
- return iaf
-}
-
-func (i *idleAwareFramer) monitor() {
- var (
- timer *time.Timer
- expired <-chan time.Time
- resetChan = i.resetChan
- setTimeoutChan = i.setTimeoutChan
- )
-Loop:
- for {
- select {
- case timeout := <-i.setTimeoutChan:
- i.timeout = timeout
- if timeout == 0 {
- if timer != nil {
- timer.Stop()
- }
- } else {
- if timer == nil {
- timer = time.NewTimer(timeout)
- expired = timer.C
- } else {
- timer.Reset(timeout)
- }
- }
- case <-resetChan:
- if timer != nil && i.timeout > 0 {
- timer.Reset(i.timeout)
- }
- case <-expired:
- i.conn.streamCond.L.Lock()
- streams := i.conn.streams
- i.conn.streams = make(map[spdy.StreamId]*Stream)
- i.conn.streamCond.Broadcast()
- i.conn.streamCond.L.Unlock()
- go func() {
- for _, stream := range streams {
- stream.resetStream()
- }
- i.conn.Close()
- }()
- case <-i.conn.closeChan:
- if timer != nil {
- timer.Stop()
- }
-
- // Start a goroutine to drain resetChan. This is needed because we've seen
- // some unit tests with large numbers of goroutines get into a situation
- // where resetChan fills up, at least 1 call to Write() is still trying to
- // send to resetChan, the connection gets closed, and this case statement
- // attempts to grab the write lock that Write() already has, causing a
- // deadlock.
- //
- // See https://github.com/docker/spdystream/issues/49 for more details.
- go func() {
- for _ = range resetChan {
- }
- }()
-
- go func() {
- for _ = range setTimeoutChan {
- }
- }()
-
- i.writeLock.Lock()
- close(resetChan)
- i.resetChan = nil
- i.writeLock.Unlock()
-
- i.setTimeoutLock.Lock()
- close(i.setTimeoutChan)
- i.setTimeoutChan = nil
- i.setTimeoutLock.Unlock()
-
- break Loop
- }
- }
-
- // Drain resetChan
- for _ = range resetChan {
- }
-}
-
-func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
- i.writeLock.Lock()
- defer i.writeLock.Unlock()
- if i.resetChan == nil {
- return io.EOF
- }
- err := i.f.WriteFrame(frame)
- if err != nil {
- return err
- }
-
- i.resetChan <- struct{}{}
-
- return nil
-}
-
-func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
- frame, err := i.f.ReadFrame()
- if err != nil {
- return nil, err
- }
-
- // resetChan should never be closed since it is only closed
- // when the connection has closed its closeChan. This closure
- // only occurs after all Reads have finished
- // TODO (dmcgowan): refactor relationship into connection
- i.resetChan <- struct{}{}
-
- return frame, nil
-}
-
-func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
- i.setTimeoutLock.Lock()
- defer i.setTimeoutLock.Unlock()
-
- if i.setTimeoutChan == nil {
- return
- }
-
- i.setTimeoutChan <- timeout
-}
-
-type Connection struct {
- conn net.Conn
- framer *idleAwareFramer
-
- closeChan chan bool
- goneAway bool
- lastStreamChan chan<- *Stream
- goAwayTimeout time.Duration
- closeTimeout time.Duration
-
- streamLock *sync.RWMutex
- streamCond *sync.Cond
- streams map[spdy.StreamId]*Stream
-
- nextIdLock sync.Mutex
- receiveIdLock sync.Mutex
- nextStreamId spdy.StreamId
- receivedStreamId spdy.StreamId
-
- pingIdLock sync.Mutex
- pingId uint32
- pingChans map[uint32]chan error
-
- shutdownLock sync.Mutex
- shutdownChan chan error
- hasShutdown bool
-
- // for testing https://github.com/docker/spdystream/pull/56
- dataFrameHandler func(*spdy.DataFrame) error
-}
-
-// NewConnection creates a new spdy connection from an existing
-// network connection.
-func NewConnection(conn net.Conn, server bool) (*Connection, error) {
- framer, framerErr := spdy.NewFramer(conn, conn)
- if framerErr != nil {
- return nil, framerErr
- }
- idleAwareFramer := newIdleAwareFramer(framer)
- var sid spdy.StreamId
- var rid spdy.StreamId
- var pid uint32
- if server {
- sid = 2
- rid = 1
- pid = 2
- } else {
- sid = 1
- rid = 2
- pid = 1
- }
-
- streamLock := new(sync.RWMutex)
- streamCond := sync.NewCond(streamLock)
-
- session := &Connection{
- conn: conn,
- framer: idleAwareFramer,
-
- closeChan: make(chan bool),
- goAwayTimeout: time.Duration(0),
- closeTimeout: time.Duration(0),
-
- streamLock: streamLock,
- streamCond: streamCond,
- streams: make(map[spdy.StreamId]*Stream),
- nextStreamId: sid,
- receivedStreamId: rid,
-
- pingId: pid,
- pingChans: make(map[uint32]chan error),
-
- shutdownChan: make(chan error),
- }
- session.dataFrameHandler = session.handleDataFrame
- idleAwareFramer.conn = session
- go idleAwareFramer.monitor()
-
- return session, nil
-}
-
-// Ping sends a ping frame across the connection and
-// returns the response time
-func (s *Connection) Ping() (time.Duration, error) {
- pid := s.pingId
- s.pingIdLock.Lock()
- if s.pingId > 0x7ffffffe {
- s.pingId = s.pingId - 0x7ffffffe
- } else {
- s.pingId = s.pingId + 2
- }
- s.pingIdLock.Unlock()
- pingChan := make(chan error)
- s.pingChans[pid] = pingChan
- defer delete(s.pingChans, pid)
-
- frame := &spdy.PingFrame{Id: pid}
- startTime := time.Now()
- writeErr := s.framer.WriteFrame(frame)
- if writeErr != nil {
- return time.Duration(0), writeErr
- }
- select {
- case <-s.closeChan:
- return time.Duration(0), errors.New("connection closed")
- case err, ok := <-pingChan:
- if ok && err != nil {
- return time.Duration(0), err
- }
- break
- }
- return time.Now().Sub(startTime), nil
-}
-
-// Serve handles frames sent from the server, including reply frames
-// which are needed to fully initiate connections. Both clients and servers
-// should call Serve in a separate goroutine before creating streams.
-func (s *Connection) Serve(newHandler StreamHandler) {
- // use a WaitGroup to wait for all frames to be drained after receiving
- // go-away.
- var wg sync.WaitGroup
-
- // Parition queues to ensure stream frames are handled
- // by the same worker, ensuring order is maintained
- frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
- for i := 0; i < FRAME_WORKERS; i++ {
- frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
-
- // Ensure frame queue is drained when connection is closed
- go func(frameQueue *PriorityFrameQueue) {
- <-s.closeChan
- frameQueue.Drain()
- }(frameQueues[i])
-
- wg.Add(1)
- go func(frameQueue *PriorityFrameQueue) {
- // let the WaitGroup know this worker is done
- defer wg.Done()
-
- s.frameHandler(frameQueue, newHandler)
- }(frameQueues[i])
- }
-
- var (
- partitionRoundRobin int
- goAwayFrame *spdy.GoAwayFrame
- )
-Loop:
- for {
- readFrame, err := s.framer.ReadFrame()
- if err != nil {
- if err != io.EOF {
- fmt.Errorf("frame read error: %s", err)
- } else {
- debugMessage("(%p) EOF received", s)
- }
- break
- }
- var priority uint8
- var partition int
- switch frame := readFrame.(type) {
- case *spdy.SynStreamFrame:
- if s.checkStreamFrame(frame) {
- priority = frame.Priority
- partition = int(frame.StreamId % FRAME_WORKERS)
- debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
- s.addStreamFrame(frame)
- } else {
- debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
- continue
- }
- case *spdy.SynReplyFrame:
- priority = s.getStreamPriority(frame.StreamId)
- partition = int(frame.StreamId % FRAME_WORKERS)
- case *spdy.DataFrame:
- priority = s.getStreamPriority(frame.StreamId)
- partition = int(frame.StreamId % FRAME_WORKERS)
- case *spdy.RstStreamFrame:
- priority = s.getStreamPriority(frame.StreamId)
- partition = int(frame.StreamId % FRAME_WORKERS)
- case *spdy.HeadersFrame:
- priority = s.getStreamPriority(frame.StreamId)
- partition = int(frame.StreamId % FRAME_WORKERS)
- case *spdy.PingFrame:
- priority = 0
- partition = partitionRoundRobin
- partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
- case *spdy.GoAwayFrame:
- // hold on to the go away frame and exit the loop
- goAwayFrame = frame
- break Loop
- default:
- priority = 7
- partition = partitionRoundRobin
- partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
- }
- frameQueues[partition].Push(readFrame, priority)
- }
- close(s.closeChan)
-
- // wait for all frame handler workers to indicate they've drained their queues
- // before handling the go away frame
- wg.Wait()
-
- if goAwayFrame != nil {
- s.handleGoAwayFrame(goAwayFrame)
- }
-
- // now it's safe to close remote channels and empty s.streams
- s.streamCond.L.Lock()
- // notify streams that they're now closed, which will
- // unblock any stream Read() calls
- for _, stream := range s.streams {
- stream.closeRemoteChannels()
- }
- s.streams = make(map[spdy.StreamId]*Stream)
- s.streamCond.Broadcast()
- s.streamCond.L.Unlock()
-}
-
-func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
- for {
- popFrame := frameQueue.Pop()
- if popFrame == nil {
- return
- }
-
- var frameErr error
- switch frame := popFrame.(type) {
- case *spdy.SynStreamFrame:
- frameErr = s.handleStreamFrame(frame, newHandler)
- case *spdy.SynReplyFrame:
- frameErr = s.handleReplyFrame(frame)
- case *spdy.DataFrame:
- frameErr = s.dataFrameHandler(frame)
- case *spdy.RstStreamFrame:
- frameErr = s.handleResetFrame(frame)
- case *spdy.HeadersFrame:
- frameErr = s.handleHeaderFrame(frame)
- case *spdy.PingFrame:
- frameErr = s.handlePingFrame(frame)
- case *spdy.GoAwayFrame:
- frameErr = s.handleGoAwayFrame(frame)
- default:
- frameErr = fmt.Errorf("unhandled frame type: %T", frame)
- }
-
- if frameErr != nil {
- fmt.Errorf("frame handling error: %s", frameErr)
- }
- }
-}
-
-func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
- stream, streamOk := s.getStream(streamId)
- if !streamOk {
- return 7
- }
- return stream.priority
-}
-
-func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
- var parent *Stream
- if frame.AssociatedToStreamId != spdy.StreamId(0) {
- parent, _ = s.getStream(frame.AssociatedToStreamId)
- }
-
- stream := &Stream{
- streamId: frame.StreamId,
- parent: parent,
- conn: s,
- startChan: make(chan error),
- headers: frame.Headers,
- finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
- replyCond: sync.NewCond(new(sync.Mutex)),
- dataChan: make(chan []byte),
- headerChan: make(chan http.Header),
- closeChan: make(chan bool),
- }
- if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
- stream.closeRemoteChannels()
- }
-
- s.addStream(stream)
-}
-
-// checkStreamFrame checks to see if a stream frame is allowed.
-// If the stream is invalid, then a reset frame with protocol error
-// will be returned.
-func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
- s.receiveIdLock.Lock()
- defer s.receiveIdLock.Unlock()
- if s.goneAway {
- return false
- }
- validationErr := s.validateStreamId(frame.StreamId)
- if validationErr != nil {
- go func() {
- resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
- if resetErr != nil {
- fmt.Errorf("reset error: %s", resetErr)
- }
- }()
- return false
- }
- return true
-}
-
-func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
- stream, ok := s.getStream(frame.StreamId)
- if !ok {
- return fmt.Errorf("Missing stream: %d", frame.StreamId)
- }
-
- newHandler(stream)
-
- return nil
-}
-
-func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
- debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
- stream, streamOk := s.getStream(frame.StreamId)
- if !streamOk {
- debugMessage("Reply frame gone away for %d", frame.StreamId)
- // Stream has already gone away
- return nil
- }
- if stream.replied {
- // Stream has already received reply
- return nil
- }
- stream.replied = true
-
- // TODO Check for error
- if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
- s.remoteStreamFinish(stream)
- }
-
- close(stream.startChan)
-
- return nil
-}
-
-func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
- stream, streamOk := s.getStream(frame.StreamId)
- if !streamOk {
- // Stream has already been removed
- return nil
- }
- s.removeStream(stream)
- stream.closeRemoteChannels()
-
- if !stream.replied {
- stream.replied = true
- stream.startChan <- ErrReset
- close(stream.startChan)
- }
-
- stream.finishLock.Lock()
- stream.finished = true
- stream.finishLock.Unlock()
-
- return nil
-}
-
-func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
- stream, streamOk := s.getStream(frame.StreamId)
- if !streamOk {
- // Stream has already gone away
- return nil
- }
- if !stream.replied {
- // No reply received...Protocol error?
- return nil
- }
-
- // TODO limit headers while not blocking (use buffered chan or goroutine?)
- select {
- case <-stream.closeChan:
- return nil
- case stream.headerChan <- frame.Headers:
- }
-
- if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
- s.remoteStreamFinish(stream)
- }
-
- return nil
-}
-
-func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
- debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
- stream, streamOk := s.getStream(frame.StreamId)
- if !streamOk {
- debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
- // Stream has already gone away
- return nil
- }
- if !stream.replied {
- debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
- // No reply received...Protocol error?
- return nil
- }
-
- debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
- if len(frame.Data) > 0 {
- stream.dataLock.RLock()
- select {
- case <-stream.closeChan:
- debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
- case stream.dataChan <- frame.Data:
- debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
- }
- stream.dataLock.RUnlock()
- }
- if (frame.Flags & spdy.DataFlagFin) != 0x00 {
- s.remoteStreamFinish(stream)
- }
- return nil
-}
-
-func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
- if s.pingId&0x01 != frame.Id&0x01 {
- return s.framer.WriteFrame(frame)
- }
- pingChan, pingOk := s.pingChans[frame.Id]
- if pingOk {
- close(pingChan)
- }
- return nil
-}
-
-func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
- debugMessage("(%p) Go away received", s)
- s.receiveIdLock.Lock()
- if s.goneAway {
- s.receiveIdLock.Unlock()
- return nil
- }
- s.goneAway = true
- s.receiveIdLock.Unlock()
-
- if s.lastStreamChan != nil {
- stream, _ := s.getStream(frame.LastGoodStreamId)
- go func() {
- s.lastStreamChan <- stream
- }()
- }
-
- // Do not block frame handler waiting for closure
- go s.shutdown(s.goAwayTimeout)
-
- return nil
-}
-
-func (s *Connection) remoteStreamFinish(stream *Stream) {
- stream.closeRemoteChannels()
-
- stream.finishLock.Lock()
- if stream.finished {
- // Stream is fully closed, cleanup
- s.removeStream(stream)
- }
- stream.finishLock.Unlock()
-}
-
-// CreateStream creates a new spdy stream using the parameters for
-// creating the stream frame. The stream frame will be sent upon
-// calling this function, however this function does not wait for
-// the reply frame. If waiting for the reply is desired, use
-// the stream Wait or WaitTimeout function on the stream returned
-// by this function.
-func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
- // MUST synchronize stream creation (all the way to writing the frame)
- // as stream IDs **MUST** increase monotonically.
- s.nextIdLock.Lock()
- defer s.nextIdLock.Unlock()
-
- streamId := s.getNextStreamId()
- if streamId == 0 {
- return nil, fmt.Errorf("Unable to get new stream id")
- }
-
- stream := &Stream{
- streamId: streamId,
- parent: parent,
- conn: s,
- startChan: make(chan error),
- headers: headers,
- dataChan: make(chan []byte),
- headerChan: make(chan http.Header),
- closeChan: make(chan bool),
- }
-
- debugMessage("(%p) (%p) Create stream", s, stream)
-
- s.addStream(stream)
-
- return stream, s.sendStream(stream, fin)
-}
-
-func (s *Connection) shutdown(closeTimeout time.Duration) {
- // TODO Ensure this isn't called multiple times
- s.shutdownLock.Lock()
- if s.hasShutdown {
- s.shutdownLock.Unlock()
- return
- }
- s.hasShutdown = true
- s.shutdownLock.Unlock()
-
- var timeout <-chan time.Time
- if closeTimeout > time.Duration(0) {
- timeout = time.After(closeTimeout)
- }
- streamsClosed := make(chan bool)
-
- go func() {
- s.streamCond.L.Lock()
- for len(s.streams) > 0 {
- debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
- s.streamCond.Wait()
- }
- s.streamCond.L.Unlock()
- close(streamsClosed)
- }()
-
- var err error
- select {
- case <-streamsClosed:
- // No active streams, close should be safe
- err = s.conn.Close()
- case <-timeout:
- // Force ungraceful close
- err = s.conn.Close()
- // Wait for cleanup to clear active streams
- <-streamsClosed
- }
-
- if err != nil {
- duration := 10 * time.Minute
- time.AfterFunc(duration, func() {
- select {
- case err, ok := <-s.shutdownChan:
- if ok {
- fmt.Errorf("Unhandled close error after %s: %s", duration, err)
- }
- default:
- }
- })
- s.shutdownChan <- err
- }
- close(s.shutdownChan)
-
- return
-}
-
-// Closes spdy connection by sending GoAway frame and initiating shutdown
-func (s *Connection) Close() error {
- s.receiveIdLock.Lock()
- if s.goneAway {
- s.receiveIdLock.Unlock()
- return nil
- }
- s.goneAway = true
- s.receiveIdLock.Unlock()
-
- var lastStreamId spdy.StreamId
- if s.receivedStreamId > 2 {
- lastStreamId = s.receivedStreamId - 2
- }
-
- goAwayFrame := &spdy.GoAwayFrame{
- LastGoodStreamId: lastStreamId,
- Status: spdy.GoAwayOK,
- }
-
- err := s.framer.WriteFrame(goAwayFrame)
- if err != nil {
- return err
- }
-
- go s.shutdown(s.closeTimeout)
-
- return nil
-}
-
-// CloseWait closes the connection and waits for shutdown
-// to finish. Note the underlying network Connection
-// is not closed until the end of shutdown.
-func (s *Connection) CloseWait() error {
- closeErr := s.Close()
- if closeErr != nil {
- return closeErr
- }
- shutdownErr, ok := <-s.shutdownChan
- if ok {
- return shutdownErr
- }
- return nil
-}
-
-// Wait waits for the connection to finish shutdown or for
-// the wait timeout duration to expire. This needs to be
-// called either after Close has been called or the GOAWAYFRAME
-// has been received. If the wait timeout is 0, this function
-// will block until shutdown finishes. If wait is never called
-// and a shutdown error occurs, that error will be logged as an
-// unhandled error.
-func (s *Connection) Wait(waitTimeout time.Duration) error {
- var timeout <-chan time.Time
- if waitTimeout > time.Duration(0) {
- timeout = time.After(waitTimeout)
- }
-
- select {
- case err, ok := <-s.shutdownChan:
- if ok {
- return err
- }
- case <-timeout:
- return ErrTimeout
- }
- return nil
-}
-
-// NotifyClose registers a channel to be called when the remote
-// peer inidicates connection closure. The last stream to be
-// received by the remote will be sent on the channel. The notify
-// timeout will determine the duration between go away received
-// and the connection being closed.
-func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
- s.goAwayTimeout = timeout
- s.lastStreamChan = c
-}
-
-// SetCloseTimeout sets the amount of time close will wait for
-// streams to finish before terminating the underlying network
-// connection. Setting the timeout to 0 will cause close to
-// wait forever, which is the default.
-func (s *Connection) SetCloseTimeout(timeout time.Duration) {
- s.closeTimeout = timeout
-}
-
-// SetIdleTimeout sets the amount of time the connection may sit idle before
-// it is forcefully terminated.
-func (s *Connection) SetIdleTimeout(timeout time.Duration) {
- s.framer.setIdleTimeout(timeout)
-}
-
-func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
- var flags spdy.ControlFlags
- if fin {
- flags = spdy.ControlFlagFin
- }
-
- headerFrame := &spdy.HeadersFrame{
- StreamId: stream.streamId,
- Headers: headers,
- CFHeader: spdy.ControlFrameHeader{Flags: flags},
- }
-
- return s.framer.WriteFrame(headerFrame)
-}
-
-func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
- var flags spdy.ControlFlags
- if fin {
- flags = spdy.ControlFlagFin
- }
-
- replyFrame := &spdy.SynReplyFrame{
- StreamId: stream.streamId,
- Headers: headers,
- CFHeader: spdy.ControlFrameHeader{Flags: flags},
- }
-
- return s.framer.WriteFrame(replyFrame)
-}
-
-func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
- resetFrame := &spdy.RstStreamFrame{
- StreamId: streamId,
- Status: status,
- }
-
- return s.framer.WriteFrame(resetFrame)
-}
-
-func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
- return s.sendResetFrame(status, stream.streamId)
-}
-
-func (s *Connection) sendStream(stream *Stream, fin bool) error {
- var flags spdy.ControlFlags
- if fin {
- flags = spdy.ControlFlagFin
- stream.finished = true
- }
-
- var parentId spdy.StreamId
- if stream.parent != nil {
- parentId = stream.parent.streamId
- }
-
- streamFrame := &spdy.SynStreamFrame{
- StreamId: spdy.StreamId(stream.streamId),
- AssociatedToStreamId: spdy.StreamId(parentId),
- Headers: stream.headers,
- CFHeader: spdy.ControlFrameHeader{Flags: flags},
- }
-
- return s.framer.WriteFrame(streamFrame)
-}
-
-// getNextStreamId returns the next sequential id
-// every call should produce a unique value or an error
-func (s *Connection) getNextStreamId() spdy.StreamId {
- sid := s.nextStreamId
- if sid > 0x7fffffff {
- return 0
- }
- s.nextStreamId = s.nextStreamId + 2
- return sid
-}
-
-// PeekNextStreamId returns the next sequential id and keeps the next id untouched
-func (s *Connection) PeekNextStreamId() spdy.StreamId {
- sid := s.nextStreamId
- return sid
-}
-
-func (s *Connection) validateStreamId(rid spdy.StreamId) error {
- if rid > 0x7fffffff || rid < s.receivedStreamId {
- return ErrInvalidStreamId
- }
- s.receivedStreamId = rid + 2
- return nil
-}
-
-func (s *Connection) addStream(stream *Stream) {
- s.streamCond.L.Lock()
- s.streams[stream.streamId] = stream
- debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
- s.streamCond.Broadcast()
- s.streamCond.L.Unlock()
-}
-
-func (s *Connection) removeStream(stream *Stream) {
- s.streamCond.L.Lock()
- delete(s.streams, stream.streamId)
- debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
- s.streamCond.Broadcast()
- s.streamCond.L.Unlock()
-}
-
-func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
- s.streamLock.RLock()
- stream, ok = s.streams[streamId]
- s.streamLock.RUnlock()
- return
-}
-
-// FindStream looks up the given stream id and either waits for the
-// stream to be found or returns nil if the stream id is no longer
-// valid.
-func (s *Connection) FindStream(streamId uint32) *Stream {
- var stream *Stream
- var ok bool
- s.streamCond.L.Lock()
- stream, ok = s.streams[spdy.StreamId(streamId)]
- debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
- for !ok && streamId >= uint32(s.receivedStreamId) {
- s.streamCond.Wait()
- stream, ok = s.streams[spdy.StreamId(streamId)]
- }
- s.streamCond.L.Unlock()
- return stream
-}
-
-func (s *Connection) CloseChan() <-chan bool {
- return s.closeChan
-}
diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go
deleted file mode 100644
index b59fa5fdc..000000000
--- a/vendor/github.com/docker/spdystream/handlers.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package spdystream
-
-import (
- "io"
- "net/http"
-)
-
-// MirrorStreamHandler mirrors all streams.
-func MirrorStreamHandler(stream *Stream) {
- replyErr := stream.SendReply(http.Header{}, false)
- if replyErr != nil {
- return
- }
-
- go func() {
- io.Copy(stream, stream)
- stream.Close()
- }()
- go func() {
- for {
- header, receiveErr := stream.ReceiveHeader()
- if receiveErr != nil {
- return
- }
- sendErr := stream.SendHeader(header, false)
- if sendErr != nil {
- return
- }
- }
- }()
-}
-
-// NoopStreamHandler does nothing when stream connects, most
-// likely used with RejectAuthHandler which will not allow any
-// streams to make it to the stream handler.
-func NoOpStreamHandler(stream *Stream) {
- stream.SendReply(http.Header{}, false)
-}
diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go
deleted file mode 100644
index fc8582b5c..000000000
--- a/vendor/github.com/docker/spdystream/priority.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package spdystream
-
-import (
- "container/heap"
- "sync"
-
- "github.com/docker/spdystream/spdy"
-)
-
-type prioritizedFrame struct {
- frame spdy.Frame
- priority uint8
- insertId uint64
-}
-
-type frameQueue []*prioritizedFrame
-
-func (fq frameQueue) Len() int {
- return len(fq)
-}
-
-func (fq frameQueue) Less(i, j int) bool {
- if fq[i].priority == fq[j].priority {
- return fq[i].insertId < fq[j].insertId
- }
- return fq[i].priority < fq[j].priority
-}
-
-func (fq frameQueue) Swap(i, j int) {
- fq[i], fq[j] = fq[j], fq[i]
-}
-
-func (fq *frameQueue) Push(x interface{}) {
- *fq = append(*fq, x.(*prioritizedFrame))
-}
-
-func (fq *frameQueue) Pop() interface{} {
- old := *fq
- n := len(old)
- *fq = old[0 : n-1]
- return old[n-1]
-}
-
-type PriorityFrameQueue struct {
- queue *frameQueue
- c *sync.Cond
- size int
- nextInsertId uint64
- drain bool
-}
-
-func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
- queue := make(frameQueue, 0, size)
- heap.Init(&queue)
-
- return &PriorityFrameQueue{
- queue: &queue,
- size: size,
- c: sync.NewCond(&sync.Mutex{}),
- }
-}
-
-func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
- q.c.L.Lock()
- defer q.c.L.Unlock()
- for q.queue.Len() >= q.size {
- q.c.Wait()
- }
- pFrame := &prioritizedFrame{
- frame: frame,
- priority: priority,
- insertId: q.nextInsertId,
- }
- q.nextInsertId = q.nextInsertId + 1
- heap.Push(q.queue, pFrame)
- q.c.Signal()
-}
-
-func (q *PriorityFrameQueue) Pop() spdy.Frame {
- q.c.L.Lock()
- defer q.c.L.Unlock()
- for q.queue.Len() == 0 {
- if q.drain {
- return nil
- }
- q.c.Wait()
- }
- frame := heap.Pop(q.queue).(*prioritizedFrame).frame
- q.c.Signal()
- return frame
-}
-
-func (q *PriorityFrameQueue) Drain() {
- q.c.L.Lock()
- defer q.c.L.Unlock()
- q.drain = true
- q.c.Broadcast()
-}
diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go
deleted file mode 100644
index 5a5ff0e14..000000000
--- a/vendor/github.com/docker/spdystream/spdy/dictionary.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package spdy
-
-// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
-var headerDictionary = []byte{
- 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
- 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
- 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
- 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
- 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
- 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
- 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
- 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
- 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
- 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
- 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
- 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
- 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
- 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
- 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
- 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
- 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
- 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
- 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
- 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
- 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
- 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
- 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
- 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
- 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
- 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
- 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
- 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
- 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
- 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
- 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
- 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
- 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
- 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
- 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
- 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
- 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
- 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
- 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
- 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
- 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
- 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
- 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
- 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
- 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
- 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
- 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
- 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
- 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
- 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
- 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
- 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
- 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
- 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
- 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
- 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
- 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
- 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
- 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
- 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
- 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
- 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
- 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
- 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
- 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
- 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
- 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
- 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
- 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
- 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
- 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
- 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
- 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
- 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
- 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
- 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
- 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
- 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
- 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
- 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
- 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
- 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
- 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
- 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
- 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
- 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
- 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
- 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
- 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
- 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
- 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
- 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
- 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
- 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
- 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
- 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
- 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
- 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
- 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
- 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
- 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
- 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
- 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
- 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
- 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
- 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
- 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
- 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
- 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
- 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
- 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
- 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
- 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
- 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
- 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
- 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
- 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
- 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
- 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
- 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
- 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
- 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
- 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
- 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
- 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
- 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
- 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
- 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
- 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
- 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
- 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
- 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
- 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
- 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
- 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
- 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
- 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
- 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
- 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
- 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
- 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
- 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
- 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
- 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
- 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
- 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
- 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
- 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
- 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
- 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
- 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
- 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
-}
diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go
deleted file mode 100644
index 9359a9501..000000000
--- a/vendor/github.com/docker/spdystream/spdy/read.go
+++ /dev/null
@@ -1,348 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package spdy
-
-import (
- "compress/zlib"
- "encoding/binary"
- "io"
- "net/http"
- "strings"
-)
-
-func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
- return f.readSynStreamFrame(h, frame)
-}
-
-func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
- return f.readSynReplyFrame(h, frame)
-}
-
-func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
- return err
- }
- if frame.Status == 0 {
- return &Error{InvalidControlFrame, frame.StreamId}
- }
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- return nil
-}
-
-func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- var numSettings uint32
- if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
- return err
- }
- frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
- for i := uint32(0); i < numSettings; i++ {
- if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
- return err
- }
- frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
- frame.FlagIdValues[i].Id &= 0xffffff
- if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
- return err
- }
- if frame.Id == 0 {
- return &Error{ZeroStreamId, 0}
- }
- if frame.CFHeader.Flags != 0 {
- return &Error{InvalidControlFrame, StreamId(frame.Id)}
- }
- return nil
-}
-
-func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
- return err
- }
- if frame.CFHeader.Flags != 0 {
- return &Error{InvalidControlFrame, frame.LastGoodStreamId}
- }
- if frame.CFHeader.length != 8 {
- return &Error{InvalidControlFrame, frame.LastGoodStreamId}
- }
- if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
- return err
- }
- return nil
-}
-
-func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
- return f.readHeadersFrame(h, frame)
-}
-
-func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
- frame.CFHeader = h
- if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- if frame.CFHeader.Flags != 0 {
- return &Error{InvalidControlFrame, frame.StreamId}
- }
- if frame.CFHeader.length != 8 {
- return &Error{InvalidControlFrame, frame.StreamId}
- }
- if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
- return err
- }
- return nil
-}
-
-func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
- ctor, ok := cframeCtor[frameType]
- if !ok {
- return nil, &Error{Err: InvalidControlFrame}
- }
- return ctor(), nil
-}
-
-var cframeCtor = map[ControlFrameType]func() controlFrame{
- TypeSynStream: func() controlFrame { return new(SynStreamFrame) },
- TypeSynReply: func() controlFrame { return new(SynReplyFrame) },
- TypeRstStream: func() controlFrame { return new(RstStreamFrame) },
- TypeSettings: func() controlFrame { return new(SettingsFrame) },
- TypePing: func() controlFrame { return new(PingFrame) },
- TypeGoAway: func() controlFrame { return new(GoAwayFrame) },
- TypeHeaders: func() controlFrame { return new(HeadersFrame) },
- TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
-}
-
-func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
- if f.headerDecompressor != nil {
- f.headerReader.N = payloadSize
- return nil
- }
- f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
- decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
- if err != nil {
- return err
- }
- f.headerDecompressor = decompressor
- return nil
-}
-
-// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
-func (f *Framer) ReadFrame() (Frame, error) {
- var firstWord uint32
- if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
- return nil, err
- }
- if firstWord&0x80000000 != 0 {
- frameType := ControlFrameType(firstWord & 0xffff)
- version := uint16(firstWord >> 16 & 0x7fff)
- return f.parseControlFrame(version, frameType)
- }
- return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
-}
-
-func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
- var length uint32
- if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
- return nil, err
- }
- flags := ControlFlags((length & 0xff000000) >> 24)
- length &= 0xffffff
- header := ControlFrameHeader{version, frameType, flags, length}
- cframe, err := newControlFrame(frameType)
- if err != nil {
- return nil, err
- }
- if err = cframe.read(header, f); err != nil {
- return nil, err
- }
- return cframe, nil
-}
-
-func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
- var numHeaders uint32
- if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
- return nil, err
- }
- var e error
- h := make(http.Header, int(numHeaders))
- for i := 0; i < int(numHeaders); i++ {
- var length uint32
- if err := binary.Read(r, binary.BigEndian, &length); err != nil {
- return nil, err
- }
- nameBytes := make([]byte, length)
- if _, err := io.ReadFull(r, nameBytes); err != nil {
- return nil, err
- }
- name := string(nameBytes)
- if name != strings.ToLower(name) {
- e = &Error{UnlowercasedHeaderName, streamId}
- name = strings.ToLower(name)
- }
- if h[name] != nil {
- e = &Error{DuplicateHeaders, streamId}
- }
- if err := binary.Read(r, binary.BigEndian, &length); err != nil {
- return nil, err
- }
- value := make([]byte, length)
- if _, err := io.ReadFull(r, value); err != nil {
- return nil, err
- }
- valueList := strings.Split(string(value), headerValueSeparator)
- for _, v := range valueList {
- h.Add(name, v)
- }
- }
- if e != nil {
- return h, e
- }
- return h, nil
-}
-
-func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
- frame.CFHeader = h
- var err error
- if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
- return err
- }
- if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
- return err
- }
- frame.Priority >>= 5
- if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
- return err
- }
- reader := f.r
- if !f.headerCompressionDisabled {
- err := f.uncorkHeaderDecompressor(int64(h.length - 10))
- if err != nil {
- return err
- }
- reader = f.headerDecompressor
- }
- frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
- if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
- err = &Error{WrongCompressedPayloadSize, 0}
- }
- if err != nil {
- return err
- }
- for h := range frame.Headers {
- if invalidReqHeaders[h] {
- return &Error{InvalidHeaderPresent, frame.StreamId}
- }
- }
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- return nil
-}
-
-func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
- frame.CFHeader = h
- var err error
- if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- reader := f.r
- if !f.headerCompressionDisabled {
- err := f.uncorkHeaderDecompressor(int64(h.length - 4))
- if err != nil {
- return err
- }
- reader = f.headerDecompressor
- }
- frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
- if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
- err = &Error{WrongCompressedPayloadSize, 0}
- }
- if err != nil {
- return err
- }
- for h := range frame.Headers {
- if invalidRespHeaders[h] {
- return &Error{InvalidHeaderPresent, frame.StreamId}
- }
- }
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- return nil
-}
-
-func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
- frame.CFHeader = h
- var err error
- if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
- return err
- }
- reader := f.r
- if !f.headerCompressionDisabled {
- err := f.uncorkHeaderDecompressor(int64(h.length - 4))
- if err != nil {
- return err
- }
- reader = f.headerDecompressor
- }
- frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
- if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
- err = &Error{WrongCompressedPayloadSize, 0}
- }
- if err != nil {
- return err
- }
- var invalidHeaders map[string]bool
- if frame.StreamId%2 == 0 {
- invalidHeaders = invalidReqHeaders
- } else {
- invalidHeaders = invalidRespHeaders
- }
- for h := range frame.Headers {
- if invalidHeaders[h] {
- return &Error{InvalidHeaderPresent, frame.StreamId}
- }
- }
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- return nil
-}
-
-func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
- var length uint32
- if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
- return nil, err
- }
- var frame DataFrame
- frame.StreamId = streamId
- frame.Flags = DataFlags(length >> 24)
- length &= 0xffffff
- frame.Data = make([]byte, length)
- if _, err := io.ReadFull(f.r, frame.Data); err != nil {
- return nil, err
- }
- if frame.StreamId == 0 {
- return nil, &Error{ZeroStreamId, 0}
- }
- return &frame, nil
-}
diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go
deleted file mode 100644
index 7b6ee9c6f..000000000
--- a/vendor/github.com/docker/spdystream/spdy/types.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package spdy implements the SPDY protocol (currently SPDY/3), described in
-// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
-package spdy
-
-import (
- "bytes"
- "compress/zlib"
- "io"
- "net/http"
-)
-
-// Version is the protocol version number that this package implements.
-const Version = 3
-
-// ControlFrameType stores the type field in a control frame header.
-type ControlFrameType uint16
-
-const (
- TypeSynStream ControlFrameType = 0x0001
- TypeSynReply = 0x0002
- TypeRstStream = 0x0003
- TypeSettings = 0x0004
- TypePing = 0x0006
- TypeGoAway = 0x0007
- TypeHeaders = 0x0008
- TypeWindowUpdate = 0x0009
-)
-
-// ControlFlags are the flags that can be set on a control frame.
-type ControlFlags uint8
-
-const (
- ControlFlagFin ControlFlags = 0x01
- ControlFlagUnidirectional = 0x02
- ControlFlagSettingsClearSettings = 0x01
-)
-
-// DataFlags are the flags that can be set on a data frame.
-type DataFlags uint8
-
-const (
- DataFlagFin DataFlags = 0x01
-)
-
-// MaxDataLength is the maximum number of bytes that can be stored in one frame.
-const MaxDataLength = 1<<24 - 1
-
-// headerValueSepator separates multiple header values.
-const headerValueSeparator = "\x00"
-
-// Frame is a single SPDY frame in its unpacked in-memory representation. Use
-// Framer to read and write it.
-type Frame interface {
- write(f *Framer) error
-}
-
-// ControlFrameHeader contains all the fields in a control frame header,
-// in its unpacked in-memory representation.
-type ControlFrameHeader struct {
- // Note, high bit is the "Control" bit.
- version uint16 // spdy version number
- frameType ControlFrameType
- Flags ControlFlags
- length uint32 // length of data field
-}
-
-type controlFrame interface {
- Frame
- read(h ControlFrameHeader, f *Framer) error
-}
-
-// StreamId represents a 31-bit value identifying the stream.
-type StreamId uint32
-
-// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
-// frame.
-type SynStreamFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
- Priority uint8 // priority of this frame (3-bit)
- Slot uint8 // index in the server's credential vector of the client certificate
- Headers http.Header
-}
-
-// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
-type SynReplyFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- Headers http.Header
-}
-
-// RstStreamStatus represents the status that led to a RST_STREAM.
-type RstStreamStatus uint32
-
-const (
- ProtocolError RstStreamStatus = iota + 1
- InvalidStream
- RefusedStream
- UnsupportedVersion
- Cancel
- InternalError
- FlowControlError
- StreamInUse
- StreamAlreadyClosed
- InvalidCredentials
- FrameTooLarge
-)
-
-// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
-// frame.
-type RstStreamFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- Status RstStreamStatus
-}
-
-// SettingsFlag represents a flag in a SETTINGS frame.
-type SettingsFlag uint8
-
-const (
- FlagSettingsPersistValue SettingsFlag = 0x1
- FlagSettingsPersisted = 0x2
-)
-
-// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
-type SettingsId uint32
-
-const (
- SettingsUploadBandwidth SettingsId = iota + 1
- SettingsDownloadBandwidth
- SettingsRoundTripTime
- SettingsMaxConcurrentStreams
- SettingsCurrentCwnd
- SettingsDownloadRetransRate
- SettingsInitialWindowSize
- SettingsClientCretificateVectorSize
-)
-
-// SettingsFlagIdValue is the unpacked, in-memory representation of the
-// combined flag/id/value for a setting in a SETTINGS frame.
-type SettingsFlagIdValue struct {
- Flag SettingsFlag
- Id SettingsId
- Value uint32
-}
-
-// SettingsFrame is the unpacked, in-memory representation of a SPDY
-// SETTINGS frame.
-type SettingsFrame struct {
- CFHeader ControlFrameHeader
- FlagIdValues []SettingsFlagIdValue
-}
-
-// PingFrame is the unpacked, in-memory representation of a PING frame.
-type PingFrame struct {
- CFHeader ControlFrameHeader
- Id uint32 // unique id for this ping, from server is even, from client is odd.
-}
-
-// GoAwayStatus represents the status in a GoAwayFrame.
-type GoAwayStatus uint32
-
-const (
- GoAwayOK GoAwayStatus = iota
- GoAwayProtocolError
- GoAwayInternalError
-)
-
-// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
-type GoAwayFrame struct {
- CFHeader ControlFrameHeader
- LastGoodStreamId StreamId // last stream id which was accepted by sender
- Status GoAwayStatus
-}
-
-// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
-type HeadersFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- Headers http.Header
-}
-
-// WindowUpdateFrame is the unpacked, in-memory representation of a
-// WINDOW_UPDATE frame.
-type WindowUpdateFrame struct {
- CFHeader ControlFrameHeader
- StreamId StreamId
- DeltaWindowSize uint32 // additional number of bytes to existing window size
-}
-
-// TODO: Implement credential frame and related methods.
-
-// DataFrame is the unpacked, in-memory representation of a DATA frame.
-type DataFrame struct {
- // Note, high bit is the "Control" bit. Should be 0 for data frames.
- StreamId StreamId
- Flags DataFlags
- Data []byte // payload data of this frame
-}
-
-// A SPDY specific error.
-type ErrorCode string
-
-const (
- UnlowercasedHeaderName ErrorCode = "header was not lowercased"
- DuplicateHeaders = "multiple headers with same name"
- WrongCompressedPayloadSize = "compressed payload size was incorrect"
- UnknownFrameType = "unknown frame type"
- InvalidControlFrame = "invalid control frame"
- InvalidDataFrame = "invalid data frame"
- InvalidHeaderPresent = "frame contained invalid header"
- ZeroStreamId = "stream id zero is disallowed"
-)
-
-// Error contains both the type of error and additional values. StreamId is 0
-// if Error is not associated with a stream.
-type Error struct {
- Err ErrorCode
- StreamId StreamId
-}
-
-func (e *Error) Error() string {
- return string(e.Err)
-}
-
-var invalidReqHeaders = map[string]bool{
- "Connection": true,
- "Host": true,
- "Keep-Alive": true,
- "Proxy-Connection": true,
- "Transfer-Encoding": true,
-}
-
-var invalidRespHeaders = map[string]bool{
- "Connection": true,
- "Keep-Alive": true,
- "Proxy-Connection": true,
- "Transfer-Encoding": true,
-}
-
-// Framer handles serializing/deserializing SPDY frames, including compressing/
-// decompressing payloads.
-type Framer struct {
- headerCompressionDisabled bool
- w io.Writer
- headerBuf *bytes.Buffer
- headerCompressor *zlib.Writer
- r io.Reader
- headerReader io.LimitedReader
- headerDecompressor io.ReadCloser
-}
-
-// NewFramer allocates a new Framer for a given SPDY connection, represented by
-// a io.Writer and io.Reader. Note that Framer will read and write individual fields
-// from/to the Reader and Writer, so the caller should pass in an appropriately
-// buffered implementation to optimize performance.
-func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
- compressBuf := new(bytes.Buffer)
- compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
- if err != nil {
- return nil, err
- }
- framer := &Framer{
- w: w,
- headerBuf: compressBuf,
- headerCompressor: compressor,
- r: r,
- }
- return framer, nil
-}
diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go
deleted file mode 100644
index b212f66a2..000000000
--- a/vendor/github.com/docker/spdystream/spdy/write.go
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package spdy
-
-import (
- "encoding/binary"
- "io"
- "net/http"
- "strings"
-)
-
-func (frame *SynStreamFrame) write(f *Framer) error {
- return f.writeSynStreamFrame(frame)
-}
-
-func (frame *SynReplyFrame) write(f *Framer) error {
- return f.writeSynReplyFrame(frame)
-}
-
-func (frame *RstStreamFrame) write(f *Framer) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeRstStream
- frame.CFHeader.Flags = 0
- frame.CFHeader.length = 8
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- if frame.Status == 0 {
- return &Error{InvalidControlFrame, frame.StreamId}
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
- return
- }
- return
-}
-
-func (frame *SettingsFrame) write(f *Framer) (err error) {
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeSettings
- frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
- return
- }
- for _, flagIdValue := range frame.FlagIdValues {
- flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
- if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
- return
- }
- }
- return
-}
-
-func (frame *PingFrame) write(f *Framer) (err error) {
- if frame.Id == 0 {
- return &Error{ZeroStreamId, 0}
- }
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypePing
- frame.CFHeader.Flags = 0
- frame.CFHeader.length = 4
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
- return
- }
- return
-}
-
-func (frame *GoAwayFrame) write(f *Framer) (err error) {
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeGoAway
- frame.CFHeader.Flags = 0
- frame.CFHeader.length = 8
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
- return
- }
- return nil
-}
-
-func (frame *HeadersFrame) write(f *Framer) error {
- return f.writeHeadersFrame(frame)
-}
-
-func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeWindowUpdate
- frame.CFHeader.Flags = 0
- frame.CFHeader.length = 8
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
- return
- }
- return nil
-}
-
-func (frame *DataFrame) write(f *Framer) error {
- return f.writeDataFrame(frame)
-}
-
-// WriteFrame writes a frame.
-func (f *Framer) WriteFrame(frame Frame) error {
- return frame.write(f)
-}
-
-func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
- if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
- return err
- }
- if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
- return err
- }
- flagsAndLength := uint32(h.Flags)<<24 | h.length
- if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
- return err
- }
- return nil
-}
-
-func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
- n = 0
- if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
- return
- }
- n += 2
- for name, values := range h {
- if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
- return
- }
- n += 2
- name = strings.ToLower(name)
- if _, err = io.WriteString(w, name); err != nil {
- return
- }
- n += len(name)
- v := strings.Join(values, headerValueSeparator)
- if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
- return
- }
- n += 2
- if _, err = io.WriteString(w, v); err != nil {
- return
- }
- n += len(v)
- }
- return
-}
-
-func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- // Marshal the headers.
- var writer io.Writer = f.headerBuf
- if !f.headerCompressionDisabled {
- writer = f.headerCompressor
- }
- if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
- return
- }
- if !f.headerCompressionDisabled {
- f.headerCompressor.Flush()
- }
-
- // Set ControlFrameHeader.
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeSynStream
- frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return err
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return err
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
- return err
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
- return err
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
- return err
- }
- if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
- return err
- }
- f.headerBuf.Reset()
- return nil
-}
-
-func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- // Marshal the headers.
- var writer io.Writer = f.headerBuf
- if !f.headerCompressionDisabled {
- writer = f.headerCompressor
- }
- if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
- return
- }
- if !f.headerCompressionDisabled {
- f.headerCompressor.Flush()
- }
-
- // Set ControlFrameHeader.
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeSynReply
- frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
- return
- }
- f.headerBuf.Reset()
- return
-}
-
-func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- // Marshal the headers.
- var writer io.Writer = f.headerBuf
- if !f.headerCompressionDisabled {
- writer = f.headerCompressor
- }
- if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
- return
- }
- if !f.headerCompressionDisabled {
- f.headerCompressor.Flush()
- }
-
- // Set ControlFrameHeader.
- frame.CFHeader.version = Version
- frame.CFHeader.frameType = TypeHeaders
- frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
-
- // Serialize frame to Writer.
- if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
- return
- }
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
- return
- }
- f.headerBuf.Reset()
- return
-}
-
-func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
- if frame.StreamId == 0 {
- return &Error{ZeroStreamId, 0}
- }
- if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
- return &Error{InvalidDataFrame, frame.StreamId}
- }
-
- // Serialize frame to Writer.
- if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
- return
- }
- flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
- if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
- return
- }
- if _, err = f.w.Write(frame.Data); err != nil {
- return
- }
- return nil
-}
diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go
deleted file mode 100644
index f9e9ee267..000000000
--- a/vendor/github.com/docker/spdystream/stream.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package spdystream
-
-import (
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "sync"
- "time"
-
- "github.com/docker/spdystream/spdy"
-)
-
-var (
- ErrUnreadPartialData = errors.New("unread partial data")
-)
-
-type Stream struct {
- streamId spdy.StreamId
- parent *Stream
- conn *Connection
- startChan chan error
-
- dataLock sync.RWMutex
- dataChan chan []byte
- unread []byte
-
- priority uint8
- headers http.Header
- headerChan chan http.Header
- finishLock sync.Mutex
- finished bool
- replyCond *sync.Cond
- replied bool
- closeLock sync.Mutex
- closeChan chan bool
-}
-
-// WriteData writes data to stream, sending a dataframe per call
-func (s *Stream) WriteData(data []byte, fin bool) error {
- s.waitWriteReply()
- var flags spdy.DataFlags
-
- if fin {
- flags = spdy.DataFlagFin
- s.finishLock.Lock()
- if s.finished {
- s.finishLock.Unlock()
- return ErrWriteClosedStream
- }
- s.finished = true
- s.finishLock.Unlock()
- }
-
- dataFrame := &spdy.DataFrame{
- StreamId: s.streamId,
- Flags: flags,
- Data: data,
- }
-
- debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
- return s.conn.framer.WriteFrame(dataFrame)
-}
-
-// Write writes bytes to a stream, calling write data for each call.
-func (s *Stream) Write(data []byte) (n int, err error) {
- err = s.WriteData(data, false)
- if err == nil {
- n = len(data)
- }
- return
-}
-
-// Read reads bytes from a stream, a single read will never get more
-// than what is sent on a single data frame, but a multiple calls to
-// read may get data from the same data frame.
-func (s *Stream) Read(p []byte) (n int, err error) {
- if s.unread == nil {
- select {
- case <-s.closeChan:
- return 0, io.EOF
- case read, ok := <-s.dataChan:
- if !ok {
- return 0, io.EOF
- }
- s.unread = read
- }
- }
- n = copy(p, s.unread)
- if n < len(s.unread) {
- s.unread = s.unread[n:]
- } else {
- s.unread = nil
- }
- return
-}
-
-// ReadData reads an entire data frame and returns the byte array
-// from the data frame. If there is unread data from the result
-// of a Read call, this function will return an ErrUnreadPartialData.
-func (s *Stream) ReadData() ([]byte, error) {
- debugMessage("(%p) Reading data from %d", s, s.streamId)
- if s.unread != nil {
- return nil, ErrUnreadPartialData
- }
- select {
- case <-s.closeChan:
- return nil, io.EOF
- case read, ok := <-s.dataChan:
- if !ok {
- return nil, io.EOF
- }
- return read, nil
- }
-}
-
-func (s *Stream) waitWriteReply() {
- if s.replyCond != nil {
- s.replyCond.L.Lock()
- for !s.replied {
- s.replyCond.Wait()
- }
- s.replyCond.L.Unlock()
- }
-}
-
-// Wait waits for the stream to receive a reply.
-func (s *Stream) Wait() error {
- return s.WaitTimeout(time.Duration(0))
-}
-
-// WaitTimeout waits for the stream to receive a reply or for timeout.
-// When the timeout is reached, ErrTimeout will be returned.
-func (s *Stream) WaitTimeout(timeout time.Duration) error {
- var timeoutChan <-chan time.Time
- if timeout > time.Duration(0) {
- timeoutChan = time.After(timeout)
- }
-
- select {
- case err := <-s.startChan:
- if err != nil {
- return err
- }
- break
- case <-timeoutChan:
- return ErrTimeout
- }
- return nil
-}
-
-// Close closes the stream by sending an empty data frame with the
-// finish flag set, indicating this side is finished with the stream.
-func (s *Stream) Close() error {
- select {
- case <-s.closeChan:
- // Stream is now fully closed
- s.conn.removeStream(s)
- default:
- break
- }
- return s.WriteData([]byte{}, true)
-}
-
-// Reset sends a reset frame, putting the stream into the fully closed state.
-func (s *Stream) Reset() error {
- s.conn.removeStream(s)
- return s.resetStream()
-}
-
-func (s *Stream) resetStream() error {
- // Always call closeRemoteChannels, even if s.finished is already true.
- // This makes it so that stream.Close() followed by stream.Reset() allows
- // stream.Read() to unblock.
- s.closeRemoteChannels()
-
- s.finishLock.Lock()
- if s.finished {
- s.finishLock.Unlock()
- return nil
- }
- s.finished = true
- s.finishLock.Unlock()
-
- resetFrame := &spdy.RstStreamFrame{
- StreamId: s.streamId,
- Status: spdy.Cancel,
- }
- return s.conn.framer.WriteFrame(resetFrame)
-}
-
-// CreateSubStream creates a stream using the current as the parent
-func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
- return s.conn.CreateStream(headers, s, fin)
-}
-
-// SetPriority sets the stream priority, does not affect the
-// remote priority of this stream after Open has been called.
-// Valid values are 0 through 7, 0 being the highest priority
-// and 7 the lowest.
-func (s *Stream) SetPriority(priority uint8) {
- s.priority = priority
-}
-
-// SendHeader sends a header frame across the stream
-func (s *Stream) SendHeader(headers http.Header, fin bool) error {
- return s.conn.sendHeaders(headers, s, fin)
-}
-
-// SendReply sends a reply on a stream, only valid to be called once
-// when handling a new stream
-func (s *Stream) SendReply(headers http.Header, fin bool) error {
- if s.replyCond == nil {
- return errors.New("cannot reply on initiated stream")
- }
- s.replyCond.L.Lock()
- defer s.replyCond.L.Unlock()
- if s.replied {
- return nil
- }
-
- err := s.conn.sendReply(headers, s, fin)
- if err != nil {
- return err
- }
-
- s.replied = true
- s.replyCond.Broadcast()
- return nil
-}
-
-// Refuse sends a reset frame with the status refuse, only
-// valid to be called once when handling a new stream. This
-// may be used to indicate that a stream is not allowed
-// when http status codes are not being used.
-func (s *Stream) Refuse() error {
- if s.replied {
- return nil
- }
- s.replied = true
- return s.conn.sendReset(spdy.RefusedStream, s)
-}
-
-// Cancel sends a reset frame with the status canceled. This
-// can be used at any time by the creator of the Stream to
-// indicate the stream is no longer needed.
-func (s *Stream) Cancel() error {
- return s.conn.sendReset(spdy.Cancel, s)
-}
-
-// ReceiveHeader receives a header sent on the other side
-// of the stream. This function will block until a header
-// is received or stream is closed.
-func (s *Stream) ReceiveHeader() (http.Header, error) {
- select {
- case <-s.closeChan:
- break
- case header, ok := <-s.headerChan:
- if !ok {
- return nil, fmt.Errorf("header chan closed")
- }
- return header, nil
- }
- return nil, fmt.Errorf("stream closed")
-}
-
-// Parent returns the parent stream
-func (s *Stream) Parent() *Stream {
- return s.parent
-}
-
-// Headers returns the headers used to create the stream
-func (s *Stream) Headers() http.Header {
- return s.headers
-}
-
-// String returns the string version of stream using the
-// streamId to uniquely identify the stream
-func (s *Stream) String() string {
- return fmt.Sprintf("stream:%d", s.streamId)
-}
-
-// Identifier returns a 32 bit identifier for the stream
-func (s *Stream) Identifier() uint32 {
- return uint32(s.streamId)
-}
-
-// IsFinished returns whether the stream has finished
-// sending data
-func (s *Stream) IsFinished() bool {
- return s.finished
-}
-
-// Implement net.Conn interface
-
-func (s *Stream) LocalAddr() net.Addr {
- return s.conn.conn.LocalAddr()
-}
-
-func (s *Stream) RemoteAddr() net.Addr {
- return s.conn.conn.RemoteAddr()
-}
-
-// TODO set per stream values instead of connection-wide
-
-func (s *Stream) SetDeadline(t time.Time) error {
- return s.conn.conn.SetDeadline(t)
-}
-
-func (s *Stream) SetReadDeadline(t time.Time) error {
- return s.conn.conn.SetReadDeadline(t)
-}
-
-func (s *Stream) SetWriteDeadline(t time.Time) error {
- return s.conn.conn.SetWriteDeadline(t)
-}
-
-func (s *Stream) closeRemoteChannels() {
- s.closeLock.Lock()
- defer s.closeLock.Unlock()
- select {
- case <-s.closeChan:
- default:
- close(s.closeChan)
- }
-}
diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go
deleted file mode 100644
index 1b2c199a4..000000000
--- a/vendor/github.com/docker/spdystream/utils.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package spdystream
-
-import (
- "log"
- "os"
-)
-
-var (
- DEBUG = os.Getenv("DEBUG")
-)
-
-func debugMessage(fmt string, args ...interface{}) {
- if DEBUG != "" {
- log.Printf(fmt, args...)
- }
-}
diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/magefile/mage/LICENSE
index 9e4bd4dbe..d0632bc14 100644
--- a/vendor/github.com/docker/spdystream/LICENSE
+++ b/vendor/github.com/magefile/mage/LICENSE
@@ -1,4 +1,3 @@
-
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -176,7 +175,18 @@
END OF TERMS AND CONDITIONS
- Copyright 2014-2015 Docker, Inc.
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2017 the Mage authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/magefile/mage/mg/color.go b/vendor/github.com/magefile/mage/mg/color.go
new file mode 100644
index 000000000..3e2710332
--- /dev/null
+++ b/vendor/github.com/magefile/mage/mg/color.go
@@ -0,0 +1,80 @@
+package mg
+
+// Color is ANSI color type
+type Color int
+
+// If you add/change/remove any items in this constant,
+// you will need to run "stringer -type=Color" in this directory again.
+// NOTE: Please keep the list in an alphabetical order.
+const (
+ Black Color = iota
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ White
+ BrightBlack
+ BrightRed
+ BrightGreen
+ BrightYellow
+ BrightBlue
+ BrightMagenta
+ BrightCyan
+ BrightWhite
+)
+
+// AnsiColor are ANSI color codes for supported terminal colors.
+var ansiColor = map[Color]string{
+ Black: "\u001b[30m",
+ Red: "\u001b[31m",
+ Green: "\u001b[32m",
+ Yellow: "\u001b[33m",
+ Blue: "\u001b[34m",
+ Magenta: "\u001b[35m",
+ Cyan: "\u001b[36m",
+ White: "\u001b[37m",
+ BrightBlack: "\u001b[30;1m",
+ BrightRed: "\u001b[31;1m",
+ BrightGreen: "\u001b[32;1m",
+ BrightYellow: "\u001b[33;1m",
+ BrightBlue: "\u001b[34;1m",
+ BrightMagenta: "\u001b[35;1m",
+ BrightCyan: "\u001b[36;1m",
+ BrightWhite: "\u001b[37;1m",
+}
+
+// AnsiColorReset is an ANSI color code to reset the terminal color.
+const AnsiColorReset = "\033[0m"
+
+// DefaultTargetAnsiColor is a default ANSI color for colorizing targets.
+// It is set to Cyan as an arbitrary color, because it has a neutral meaning
+var DefaultTargetAnsiColor = ansiColor[Cyan]
+
+func toLowerCase(s string) string {
+ // this is a naive implementation
+ // borrowed from https://golang.org/src/strings/strings.go
+ // and only considers alphabetical characters [a-zA-Z]
+ // so that we don't depend on the "strings" package
+ buf := make([]byte, len(s))
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ buf[i] = c
+ }
+ return string(buf)
+}
+
+func getAnsiColor(color string) (string, bool) {
+ colorLower := toLowerCase(color)
+ for k, v := range ansiColor {
+ colorConstLower := toLowerCase(k.String())
+ if colorConstLower == colorLower {
+ return v, true
+ }
+ }
+ return "", false
+}
diff --git a/vendor/github.com/magefile/mage/mg/color_string.go b/vendor/github.com/magefile/mage/mg/color_string.go
new file mode 100644
index 000000000..06debca54
--- /dev/null
+++ b/vendor/github.com/magefile/mage/mg/color_string.go
@@ -0,0 +1,38 @@
+// Code generated by "stringer -type=Color"; DO NOT EDIT.
+
+package mg
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Black-0]
+ _ = x[Red-1]
+ _ = x[Green-2]
+ _ = x[Yellow-3]
+ _ = x[Blue-4]
+ _ = x[Magenta-5]
+ _ = x[Cyan-6]
+ _ = x[White-7]
+ _ = x[BrightBlack-8]
+ _ = x[BrightRed-9]
+ _ = x[BrightGreen-10]
+ _ = x[BrightYellow-11]
+ _ = x[BrightBlue-12]
+ _ = x[BrightMagenta-13]
+ _ = x[BrightCyan-14]
+ _ = x[BrightWhite-15]
+}
+
+const _Color_name = "BlackRedGreenYellowBlueMagentaCyanWhiteBrightBlackBrightRedBrightGreenBrightYellowBrightBlueBrightMagentaBrightCyanBrightWhite"
+
+var _Color_index = [...]uint8{0, 5, 8, 13, 19, 23, 30, 34, 39, 50, 59, 70, 82, 92, 105, 115, 126}
+
+func (i Color) String() string {
+ if i < 0 || i >= Color(len(_Color_index)-1) {
+ return "Color(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Color_name[_Color_index[i]:_Color_index[i+1]]
+}
diff --git a/vendor/github.com/magefile/mage/mg/deps.go b/vendor/github.com/magefile/mage/mg/deps.go
new file mode 100644
index 000000000..ad85931f8
--- /dev/null
+++ b/vendor/github.com/magefile/mage/mg/deps.go
@@ -0,0 +1,352 @@
+package mg
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+// funcType indicates a prototype of build job function
+type funcType int
+
+// funcTypes
+const (
+ invalidType funcType = iota
+ voidType
+ errorType
+ contextVoidType
+ contextErrorType
+ namespaceVoidType
+ namespaceErrorType
+ namespaceContextVoidType
+ namespaceContextErrorType
+)
+
+var logger = log.New(os.Stderr, "", 0)
+
+type onceMap struct {
+ mu *sync.Mutex
+ m map[string]*onceFun
+}
+
+func (o *onceMap) LoadOrStore(s string, one *onceFun) *onceFun {
+ defer o.mu.Unlock()
+ o.mu.Lock()
+
+ existing, ok := o.m[s]
+ if ok {
+ return existing
+ }
+ o.m[s] = one
+ return one
+}
+
+var onces = &onceMap{
+ mu: &sync.Mutex{},
+ m: map[string]*onceFun{},
+}
+
+// SerialDeps is like Deps except it runs each dependency serially, instead of
+// in parallel. This can be useful for resource intensive dependencies that
+// shouldn't be run at the same time.
+func SerialDeps(fns ...interface{}) {
+ types := checkFns(fns)
+ ctx := context.Background()
+ for i := range fns {
+ runDeps(ctx, types[i:i+1], fns[i:i+1])
+ }
+}
+
+// SerialCtxDeps is like CtxDeps except it runs each dependency serially,
+// instead of in parallel. This can be useful for resource intensive
+// dependencies that shouldn't be run at the same time.
+func SerialCtxDeps(ctx context.Context, fns ...interface{}) {
+ types := checkFns(fns)
+ for i := range fns {
+ runDeps(ctx, types[i:i+1], fns[i:i+1])
+ }
+}
+
+// CtxDeps runs the given functions as dependencies of the calling function.
+// Dependencies must only be of type:
+// func()
+// func() error
+// func(context.Context)
+// func(context.Context) error
+// Or a similar method on a mg.Namespace type.
+//
+// The function calling Deps is guaranteed that all dependent functions will be
+// run exactly once when Deps returns. Dependent functions may in turn declare
+// their own dependencies using Deps. Each dependency is run in their own
+// goroutines. Each function is given the context provided if the function
+// prototype allows for it.
+func CtxDeps(ctx context.Context, fns ...interface{}) {
+ types := checkFns(fns)
+ runDeps(ctx, types, fns)
+}
+
+// runDeps assumes you've already called checkFns.
+func runDeps(ctx context.Context, types []funcType, fns []interface{}) {
+ mu := &sync.Mutex{}
+ var errs []string
+ var exit int
+ wg := &sync.WaitGroup{}
+ for i, f := range fns {
+ fn := addDep(ctx, types[i], f)
+ wg.Add(1)
+ go func() {
+ defer func() {
+ if v := recover(); v != nil {
+ mu.Lock()
+ if err, ok := v.(error); ok {
+ exit = changeExit(exit, ExitStatus(err))
+ } else {
+ exit = changeExit(exit, 1)
+ }
+ errs = append(errs, fmt.Sprint(v))
+ mu.Unlock()
+ }
+ wg.Done()
+ }()
+ if err := fn.run(); err != nil {
+ mu.Lock()
+ errs = append(errs, fmt.Sprint(err))
+ exit = changeExit(exit, ExitStatus(err))
+ mu.Unlock()
+ }
+ }()
+ }
+
+ wg.Wait()
+ if len(errs) > 0 {
+ panic(Fatal(exit, strings.Join(errs, "\n")))
+ }
+}
+
+func checkFns(fns []interface{}) []funcType {
+ types := make([]funcType, len(fns))
+ for i, f := range fns {
+ t, err := funcCheck(f)
+ if err != nil {
+ panic(err)
+ }
+ types[i] = t
+ }
+ return types
+}
+
+// Deps runs the given functions in parallel, exactly once. Dependencies must
+// only be of type:
+// func()
+// func() error
+// func(context.Context)
+// func(context.Context) error
+// Or a similar method on a mg.Namespace type.
+//
+// This is a way to build up a tree of dependencies with each dependency
+// defining its own dependencies. Functions must have the same signature as a
+// Mage target, i.e. optional context argument, optional error return.
+func Deps(fns ...interface{}) {
+ CtxDeps(context.Background(), fns...)
+}
+
+func changeExit(old, new int) int {
+ if new == 0 {
+ return old
+ }
+ if old == 0 {
+ return new
+ }
+ if old == new {
+ return old
+ }
+ // both different and both non-zero, just set
+ // exit to 1. Nothing more we can do.
+ return 1
+}
+
+func addDep(ctx context.Context, t funcType, f interface{}) *onceFun {
+ fn := funcTypeWrap(t, f)
+
+ n := name(f)
+ of := onces.LoadOrStore(n, &onceFun{
+ fn: fn,
+ ctx: ctx,
+
+ displayName: displayName(n),
+ })
+ return of
+}
+
+func name(i interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
+}
+
+func displayName(name string) string {
+ splitByPackage := strings.Split(name, ".")
+ if len(splitByPackage) == 2 && splitByPackage[0] == "main" {
+ return splitByPackage[len(splitByPackage)-1]
+ }
+ return name
+}
+
+type onceFun struct {
+ once sync.Once
+ fn func(context.Context) error
+ ctx context.Context
+ err error
+
+ displayName string
+}
+
+func (o *onceFun) run() error {
+ o.once.Do(func() {
+ if Verbose() {
+ logger.Println("Running dependency:", o.displayName)
+ }
+ o.err = o.fn(o.ctx)
+ })
+ return o.err
+}
+
+// Returns a location of mg.Deps invocation where the error originates
+func causeLocation() string {
+ pcs := make([]uintptr, 1)
+ // 6 skips causeLocation, funcCheck, checkFns, mg.CtxDeps, mg.Deps in stacktrace
+ if runtime.Callers(6, pcs) != 1 {
+ return "<unknown>"
+ }
+ frames := runtime.CallersFrames(pcs)
+ frame, _ := frames.Next()
+ if frame.Function == "" && frame.File == "" && frame.Line == 0 {
+ return "<unknown>"
+ }
+ return fmt.Sprintf("%s %s:%d", frame.Function, frame.File, frame.Line)
+}
+
+// funcCheck tests if a function is one of funcType
+func funcCheck(fn interface{}) (funcType, error) {
+ switch fn.(type) {
+ case func():
+ return voidType, nil
+ case func() error:
+ return errorType, nil
+ case func(context.Context):
+ return contextVoidType, nil
+ case func(context.Context) error:
+ return contextErrorType, nil
+ }
+
+ err := fmt.Errorf("Invalid type for dependent function: %T. Dependencies must be func(), func() error, func(context.Context), func(context.Context) error, or the same method on an mg.Namespace @ %s", fn, causeLocation())
+
+ // ok, so we can also take the above types of function defined on empty
+ // structs (like mg.Namespace). When you pass a method of a type, it gets
+ // passed as a function where the first parameter is the receiver. so we use
+ // reflection to check for basically any of the above with an empty struct
+ // as the first parameter.
+
+ t := reflect.TypeOf(fn)
+ if t.Kind() != reflect.Func {
+ return invalidType, err
+ }
+
+ if t.NumOut() > 1 {
+ return invalidType, err
+ }
+ if t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(err) {
+ return invalidType, err
+ }
+
+ // 1 or 2 argumments, either just the struct, or struct and context.
+ if t.NumIn() == 0 || t.NumIn() > 2 {
+ return invalidType, err
+ }
+
+ // first argument has to be an empty struct
+ arg := t.In(0)
+ if arg.Kind() != reflect.Struct {
+ return invalidType, err
+ }
+ if arg.NumField() != 0 {
+ return invalidType, err
+ }
+ if t.NumIn() == 1 {
+ if t.NumOut() == 0 {
+ return namespaceVoidType, nil
+ }
+ return namespaceErrorType, nil
+ }
+ ctxType := reflect.TypeOf(context.Background())
+ if t.In(1) == ctxType {
+ return invalidType, err
+ }
+
+ if t.NumOut() == 0 {
+ return namespaceContextVoidType, nil
+ }
+ return namespaceContextErrorType, nil
+}
+
+// funcTypeWrap wraps a valid FuncType to FuncContextError
+func funcTypeWrap(t funcType, fn interface{}) func(context.Context) error {
+ switch f := fn.(type) {
+ case func():
+ return func(context.Context) error {
+ f()
+ return nil
+ }
+ case func() error:
+ return func(context.Context) error {
+ return f()
+ }
+ case func(context.Context):
+ return func(ctx context.Context) error {
+ f(ctx)
+ return nil
+ }
+ case func(context.Context) error:
+ return f
+ }
+ args := []reflect.Value{reflect.ValueOf(struct{}{})}
+ switch t {
+ case namespaceVoidType:
+ return func(context.Context) error {
+ v := reflect.ValueOf(fn)
+ v.Call(args)
+ return nil
+ }
+ case namespaceErrorType:
+ return func(context.Context) error {
+ v := reflect.ValueOf(fn)
+ ret := v.Call(args)
+ val := ret[0].Interface()
+ if val == nil {
+ return nil
+ }
+ return val.(error)
+ }
+ case namespaceContextVoidType:
+ return func(ctx context.Context) error {
+ v := reflect.ValueOf(fn)
+ v.Call(append(args, reflect.ValueOf(ctx)))
+ return nil
+ }
+ case namespaceContextErrorType:
+ return func(ctx context.Context) error {
+ v := reflect.ValueOf(fn)
+ ret := v.Call(append(args, reflect.ValueOf(ctx)))
+ val := ret[0].Interface()
+ if val == nil {
+ return nil
+ }
+ return val.(error)
+ }
+ default:
+ panic(fmt.Errorf("Don't know how to deal with dep of type %T", fn))
+ }
+}
diff --git a/vendor/github.com/magefile/mage/mg/errors.go b/vendor/github.com/magefile/mage/mg/errors.go
new file mode 100644
index 000000000..2dd780fe3
--- /dev/null
+++ b/vendor/github.com/magefile/mage/mg/errors.go
@@ -0,0 +1,51 @@
+package mg
+
+import (
+ "errors"
+ "fmt"
+)
+
+type fatalErr struct {
+ code int
+ error
+}
+
+func (f fatalErr) ExitStatus() int {
+ return f.code
+}
+
+type exitStatus interface {
+ ExitStatus() int
+}
+
+// Fatal returns an error that will cause mage to print out the
+// given args and exit with the given exit code.
+func Fatal(code int, args ...interface{}) error {
+ return fatalErr{
+ code: code,
+ error: errors.New(fmt.Sprint(args...)),
+ }
+}
+
+// Fatalf returns an error that will cause mage to print out the
+// given message and exit with the given exit code.
+func Fatalf(code int, format string, args ...interface{}) error {
+ return fatalErr{
+ code: code,
+ error: fmt.Errorf(format, args...),
+ }
+}
+
+// ExitStatus queries the error for an exit status. If the error is nil, it
+// returns 0. If the error does not implement ExitStatus() int, it returns 1.
+// Otherwise it retiurns the value from ExitStatus().
+func ExitStatus(err error) int {
+ if err == nil {
+ return 0
+ }
+ exit, ok := err.(exitStatus)
+ if !ok {
+ return 1
+ }
+ return exit.ExitStatus()
+}
diff --git a/vendor/github.com/magefile/mage/mg/runtime.go b/vendor/github.com/magefile/mage/mg/runtime.go
new file mode 100644
index 000000000..9a8de12ce
--- /dev/null
+++ b/vendor/github.com/magefile/mage/mg/runtime.go
@@ -0,0 +1,136 @@
+package mg
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+)
+
+// CacheEnv is the environment variable that users may set to change the
+// location where mage stores its compiled binaries.
+const CacheEnv = "MAGEFILE_CACHE"
+
+// VerboseEnv is the environment variable that indicates the user requested
+// verbose mode when running a magefile.
+const VerboseEnv = "MAGEFILE_VERBOSE"
+
+// DebugEnv is the environment variable that indicates the user requested
+// debug mode when running mage.
+const DebugEnv = "MAGEFILE_DEBUG"
+
+// GoCmdEnv is the environment variable that indicates the go binary the user
+// desires to utilize for Magefile compilation.
+const GoCmdEnv = "MAGEFILE_GOCMD"
+
+// IgnoreDefaultEnv is the environment variable that indicates the user requested
+// to ignore the default target specified in the magefile.
+const IgnoreDefaultEnv = "MAGEFILE_IGNOREDEFAULT"
+
+// HashFastEnv is the environment variable that indicates the user requested to
+// use a quick hash of magefiles to determine whether or not the magefile binary
+// needs to be rebuilt. This results in faster runtimes, but means that mage
+// will fail to rebuild if a dependency has changed. To force a rebuild, run
+// mage with the -f flag.
+const HashFastEnv = "MAGEFILE_HASHFAST"
+
+// EnableColorEnv is the environment variable that indicates the user is using
+// a terminal which supports a color output. The default is false for backwards
+// compatibility. When the value is true and the detected terminal does support colors
+// then the list of mage targets will be displayed in ANSI color. When the value
+// is true but the detected terminal does not support colors, then the list of
+// mage targets will be displayed in the default colors (e.g. black and white).
+const EnableColorEnv = "MAGEFILE_ENABLE_COLOR"
+
+// TargetColorEnv is the environment variable that indicates which ANSI color
+// should be used to colorize mage targets. This is only applicable when
+// the MAGEFILE_ENABLE_COLOR environment variable is true.
+// The supported ANSI color names are any of these:
+// - Black
+// - Red
+// - Green
+// - Yellow
+// - Blue
+// - Magenta
+// - Cyan
+// - White
+// - BrightBlack
+// - BrightRed
+// - BrightGreen
+// - BrightYellow
+// - BrightBlue
+// - BrightMagenta
+// - BrightCyan
+// - BrightWhite
+const TargetColorEnv = "MAGEFILE_TARGET_COLOR"
+
+// Verbose reports whether a magefile was run with the verbose flag.
+func Verbose() bool {
+ b, _ := strconv.ParseBool(os.Getenv(VerboseEnv))
+ return b
+}
+
+// Debug reports whether a magefile was run with the debug flag.
+func Debug() bool {
+ b, _ := strconv.ParseBool(os.Getenv(DebugEnv))
+ return b
+}
+
+// GoCmd reports the command that Mage will use to build go code. By default mage runs
+// the "go" binary in the PATH.
+func GoCmd() string {
+ if cmd := os.Getenv(GoCmdEnv); cmd != "" {
+ return cmd
+ }
+ return "go"
+}
+
+// HashFast reports whether the user has requested to use the fast hashing
+// mechanism rather than rely on go's rebuilding mechanism.
+func HashFast() bool {
+ b, _ := strconv.ParseBool(os.Getenv(HashFastEnv))
+ return b
+}
+
+// IgnoreDefault reports whether the user has requested to ignore the default target
+// in the magefile.
+func IgnoreDefault() bool {
+ b, _ := strconv.ParseBool(os.Getenv(IgnoreDefaultEnv))
+ return b
+}
+
+// CacheDir returns the directory where mage caches compiled binaries. It
+// defaults to $HOME/.magefile, but may be overridden by the MAGEFILE_CACHE
+// environment variable.
+func CacheDir() string {
+ d := os.Getenv(CacheEnv)
+ if d != "" {
+ return d
+ }
+ switch runtime.GOOS {
+ case "windows":
+ return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"), "magefile")
+ default:
+ return filepath.Join(os.Getenv("HOME"), ".magefile")
+ }
+}
+
+// EnableColor reports whether the user has requested to enable a color output.
+func EnableColor() bool {
+ b, _ := strconv.ParseBool(os.Getenv(EnableColorEnv))
+ return b
+}
+
+// TargetColor returns the configured ANSI color name a color output.
+func TargetColor() string {
+ s, exists := os.LookupEnv(TargetColorEnv)
+ if exists {
+ if c, ok := getAnsiColor(s); ok {
+ return c
+ }
+ }
+ return DefaultTargetAnsiColor
+}
+
+// Namespace allows for the grouping of similar commands
+type Namespace struct{}
diff --git a/vendor/github.com/magefile/mage/sh/cmd.go b/vendor/github.com/magefile/mage/sh/cmd.go
new file mode 100644
index 000000000..06af62de2
--- /dev/null
+++ b/vendor/github.com/magefile/mage/sh/cmd.go
@@ -0,0 +1,177 @@
+package sh
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+
+ "github.com/magefile/mage/mg"
+)
+
+// RunCmd returns a function that will call Run with the given command. This is
+// useful for creating command aliases to make your scripts easier to read, like
+// this:
+//
+// // in a helper file somewhere
+// var g0 = sh.RunCmd("go") // go is a keyword :(
+//
+// // somewhere in your main code
+// if err := g0("install", "github.com/gohugo/hugo"); err != nil {
+// return err
+// }
+//
+// Args passed to command get baked in as args to the command when you run it.
+// Any args passed in when you run the returned function will be appended to the
+// original args. For example, this is equivalent to the above:
+//
+// var goInstall = sh.RunCmd("go", "install") goInstall("github.com/gohugo/hugo")
+//
+// RunCmd uses Exec underneath, so see those docs for more details.
+func RunCmd(cmd string, args ...string) func(args ...string) error {
+ return func(args2 ...string) error {
+ return Run(cmd, append(args, args2...)...)
+ }
+}
+
+// OutCmd is like RunCmd except the command returns the output of the
+// command.
+func OutCmd(cmd string, args ...string) func(args ...string) (string, error) {
+ return func(args2 ...string) (string, error) {
+ return Output(cmd, append(args, args2...)...)
+ }
+}
+
+// Run is like RunWith, but doesn't specify any environment variables.
+func Run(cmd string, args ...string) error {
+ return RunWith(nil, cmd, args...)
+}
+
+// RunV is like Run, but always sends the command's stdout to os.Stdout.
+func RunV(cmd string, args ...string) error {
+ _, err := Exec(nil, os.Stdout, os.Stderr, cmd, args...)
+ return err
+}
+
+// RunWith runs the given command, directing stderr to this program's stderr and
+// printing stdout to stdout if mage was run with -v. It adds adds env to the
+// environment variables for the command being run. Environment variables should
+// be in the format name=value.
+func RunWith(env map[string]string, cmd string, args ...string) error {
+ var output io.Writer
+ if mg.Verbose() {
+ output = os.Stdout
+ }
+ _, err := Exec(env, output, os.Stderr, cmd, args...)
+ return err
+}
+
+// RunWithV is like RunWith, but always sends the command's stdout to os.Stdout.
+func RunWithV(env map[string]string, cmd string, args ...string) error {
+ _, err := Exec(env, os.Stdout, os.Stderr, cmd, args...)
+ return err
+}
+
+// Output runs the command and returns the text from stdout.
+func Output(cmd string, args ...string) (string, error) {
+ buf := &bytes.Buffer{}
+ _, err := Exec(nil, buf, os.Stderr, cmd, args...)
+ return strings.TrimSuffix(buf.String(), "\n"), err
+}
+
+// OutputWith is like RunWith, but returns what is written to stdout.
+func OutputWith(env map[string]string, cmd string, args ...string) (string, error) {
+ buf := &bytes.Buffer{}
+ _, err := Exec(env, buf, os.Stderr, cmd, args...)
+ return strings.TrimSuffix(buf.String(), "\n"), err
+}
+
+// Exec executes the command, piping its stderr to mage's stderr and
+// piping its stdout to the given writer. If the command fails, it will return
+// an error that, if returned from a target or mg.Deps call, will cause mage to
+// exit with the same code as the command failed with. Env is a list of
+// environment variables to set when running the command, these override the
+// current environment variables set (which are also passed to the command). cmd
+// and args may include references to environment variables in $FOO format, in
+// which case these will be expanded before the command is run.
+//
+// Ran reports if the command ran (rather than was not found or not executable).
+// Code reports the exit code the command returned if it ran. If err == nil, ran
+// is always true and code is always 0.
+func Exec(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, err error) {
+ expand := func(s string) string {
+ s2, ok := env[s]
+ if ok {
+ return s2
+ }
+ return os.Getenv(s)
+ }
+ cmd = os.Expand(cmd, expand)
+ for i := range args {
+ args[i] = os.Expand(args[i], expand)
+ }
+ ran, code, err := run(env, stdout, stderr, cmd, args...)
+ if err == nil {
+ return true, nil
+ }
+ if ran {
+ return ran, mg.Fatalf(code, `running "%s %s" failed with exit code %d`, cmd, strings.Join(args, " "), code)
+ }
+ return ran, fmt.Errorf(`failed to run "%s %s: %v"`, cmd, strings.Join(args, " "), err)
+}
+
+func run(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, code int, err error) {
+ c := exec.Command(cmd, args...)
+ c.Env = os.Environ()
+ for k, v := range env {
+ c.Env = append(c.Env, k+"="+v)
+ }
+ c.Stderr = stderr
+ c.Stdout = stdout
+ c.Stdin = os.Stdin
+ log.Println("exec:", cmd, strings.Join(args, " "))
+ err = c.Run()
+ return CmdRan(err), ExitStatus(err), err
+}
+
+// CmdRan examines the error to determine if it was generated as a result of a
+// command running via os/exec.Command. If the error is nil, or the command ran
+// (even if it exited with a non-zero exit code), CmdRan reports true. If the
+// error is an unrecognized type, or it is an error from exec.Command that says
+// the command failed to run (usually due to the command not existing or not
+// being executable), it reports false.
+func CmdRan(err error) bool {
+ if err == nil {
+ return true
+ }
+ ee, ok := err.(*exec.ExitError)
+ if ok {
+ return ee.Exited()
+ }
+ return false
+}
+
+type exitStatus interface {
+ ExitStatus() int
+}
+
+// ExitStatus returns the exit status of the error if it is an exec.ExitError
+// or if it implements ExitStatus() int.
+// 0 if it is nil or 1 if it is a different error.
+func ExitStatus(err error) int {
+ if err == nil {
+ return 0
+ }
+ if e, ok := err.(exitStatus); ok {
+ return e.ExitStatus()
+ }
+ if e, ok := err.(*exec.ExitError); ok {
+ if ex, ok := e.Sys().(exitStatus); ok {
+ return ex.ExitStatus()
+ }
+ }
+ return 1
+}
diff --git a/vendor/github.com/magefile/mage/sh/helpers.go b/vendor/github.com/magefile/mage/sh/helpers.go
new file mode 100644
index 000000000..f5d20a271
--- /dev/null
+++ b/vendor/github.com/magefile/mage/sh/helpers.go
@@ -0,0 +1,40 @@
+package sh
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+// Rm removes the given file or directory even if non-empty. It will not return
+// an error if the target doesn't exist, only if the target cannot be removed.
+func Rm(path string) error {
+ err := os.RemoveAll(path)
+ if err == nil || os.IsNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf(`failed to remove %s: %v`, path, err)
+}
+
+// Copy robustly copies the source file to the destination, overwriting the destination if necessary.
+func Copy(dst string, src string) error {
+ from, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf(`can't copy %s: %v`, src, err)
+ }
+ defer from.Close()
+ finfo, err := from.Stat()
+ if err != nil {
+ return fmt.Errorf(`can't stat %s: %v`, src, err)
+ }
+ to, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, finfo.Mode())
+ if err != nil {
+ return fmt.Errorf(`can't copy to %s: %v`, dst, err)
+ }
+ defer to.Close()
+ _, err = io.Copy(to, from)
+ if err != nil {
+ return fmt.Errorf(`error copying %s to %s: %v`, src, dst, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go
index b9f2d1802..9fb801162 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/tcp/tcp.go
@@ -5,6 +5,7 @@ import (
"io"
"net"
"os"
+ "strconv"
"sync"
"github.com/rootless-containers/rootlesskit/pkg/port"
@@ -12,7 +13,7 @@ import (
)
func Run(socketPath string, spec port.Spec, stopCh <-chan struct{}, logWriter io.Writer) error {
- ln, err := net.Listen("tcp", fmt.Sprintf("%s:%d", spec.ParentIP, spec.ParentPort))
+ ln, err := net.Listen("tcp", net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort)))
if err != nil {
fmt.Fprintf(logWriter, "listen: %v\n", err)
return err
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go
index d8f646b5d..fbff2b081 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udp.go
@@ -1,10 +1,10 @@
package udp
import (
- "fmt"
"io"
"net"
"os"
+ "strconv"
"github.com/pkg/errors"
@@ -14,7 +14,7 @@ import (
)
func Run(socketPath string, spec port.Spec, stopCh <-chan struct{}, logWriter io.Writer) error {
- addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", spec.ParentIP, spec.ParentPort))
+ addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(spec.ParentIP, strconv.Itoa(spec.ParentPort)))
if err != nil {
return err
}
diff --git a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go
index 1c531cac8..a885a76ca 100644
--- a/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go
+++ b/vendor/github.com/rootless-containers/rootlesskit/pkg/port/portutil/portutil.go
@@ -4,76 +4,135 @@ import (
"net"
"strconv"
"strings"
+ "text/scanner"
"github.com/pkg/errors"
"github.com/rootless-containers/rootlesskit/pkg/port"
)
-// ParsePortSpec parses a Docker-like representation of PortSpec.
+// ParsePortSpec parses a Docker-like representation of PortSpec, but with
+// support for both "parent IP" and "child IP" (optional);
// e.g. "127.0.0.1:8080:80/tcp", or "127.0.0.1:8080:10.0.2.100:80/tcp"
-func ParsePortSpec(s string) (*port.Spec, error) {
- splitBySlash := strings.SplitN(s, "/", 2)
- if len(splitBySlash) != 2 {
- return nil, errors.Errorf("unexpected PortSpec string: %q", s)
+//
+// Format is as follows:
+//
+// <parent IP>:<parent port>[:<child IP>]:<child port>/<proto>
+//
+// Note that (child IP being optional) the format can either contain 5 or 4
+// components. When using IPv6 IP addresses, addresses must use square brackets
+// to prevent the colons being mistaken for delimiters. For example:
+//
+// [::1]:8080:[::2]:80/udp
+func ParsePortSpec(portSpec string) (*port.Spec, error) {
+ const (
+ parentIP = iota
+ parentPort = iota
+ childIP = iota
+ childPort = iota
+ proto = iota
+ )
+
+ var (
+ s scanner.Scanner
+ err error
+ parts = make([]string, 5)
+ index = parentIP
+ delimiter = ':'
+ )
+
+ // First get the "proto" and "parent-port" at the end. These parts are
+ // required, whereas "ParentIP" is optional. Removing them first makes
+ // it easier to parse the remaining parts, as otherwise the third part
+ // could be _either_ an IP-address _or_ a Port.
+
+ // Get the proto
+ protoPos := strings.LastIndex(portSpec, "/")
+ if protoPos < 0 {
+ return nil, errors.Errorf("missing proto in PortSpec string: %q", portSpec)
}
- proto := splitBySlash[1]
- switch proto {
- case "tcp", "udp", "sctp":
- default:
- return nil, errors.Errorf("unexpected Proto in PortSpec string: %q", s)
+ parts[proto] = portSpec[protoPos+1:]
+ err = validateProto(parts[proto])
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid PortSpec string: %q", portSpec)
}
- splitByColon := strings.SplitN(splitBySlash[0], ":", 4)
- switch len(splitByColon) {
- case 3, 4:
- default:
- return nil, errors.Errorf("unexpected PortSpec string: %q", s)
+ // Get the parent port
+ portPos := strings.LastIndex(portSpec, ":")
+ if portPos < 0 {
+ return nil, errors.Errorf("unexpected PortSpec string: %q", portSpec)
}
+ parts[childPort] = portSpec[portPos+1 : protoPos]
+
+ // Scan the remainder "<IP-address>:<port>[:<IP-address>]"
+ s.Init(strings.NewReader(portSpec[:portPos]))
+
+ for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
+ if index > childPort {
+ return nil, errors.Errorf("unexpected PortSpec string: %q", portSpec)
+ }
- parentIP := splitByColon[0]
- if net.IP(parentIP) == nil {
- return nil, errors.Errorf("unexpected ParentIP in PortSpec string: %q", s)
+ switch tok {
+ case '[':
+ // Start of IPv6 IP-address; value ends at closing bracket (])
+ delimiter = ']'
+ continue
+ case delimiter:
+ if delimiter == ']' {
+ // End of IPv6 IP-address
+ delimiter = ':'
+ // Skip the next token, which should be a colon delimiter (:)
+ tok = s.Scan()
+ }
+ index++
+ continue
+ default:
+ parts[index] += s.TokenText()
+ }
}
- parentPort, err := strconv.Atoi(splitByColon[1])
- if err != nil {
- return nil, errors.Wrapf(err, "unexpected ParentPort in PortSpec string: %q", s)
+ if parts[parentIP] != "" && net.ParseIP(parts[parentIP]) == nil {
+ return nil, errors.Errorf("unexpected ParentIP in PortSpec string: %q", portSpec)
+ }
+ if parts[childIP] != "" && net.ParseIP(parts[childIP]) == nil {
+ return nil, errors.Errorf("unexpected ParentIP in PortSpec string: %q", portSpec)
}
- var childIP string
- if len(splitByColon) == 4 {
- childIP = splitByColon[2]
- if net.IP(childIP) == nil {
- return nil, errors.Errorf("unexpected ChildIP in PortSpec string: %q", s)
- }
+ ps := &port.Spec{
+ Proto: parts[proto],
+ ParentIP: parts[parentIP],
+ ChildIP: parts[childIP],
}
- childPort, err := strconv.Atoi(splitByColon[len(splitByColon)-1])
+ ps.ParentPort, err = strconv.Atoi(parts[parentPort])
if err != nil {
- return nil, errors.Wrapf(err, "unexpected ChildPort in PortSpec string: %q", s)
+ return nil, errors.Wrapf(err, "unexpected ChildPort in PortSpec string: %q", portSpec)
}
- return &port.Spec{
- Proto: proto,
- ParentIP: parentIP,
- ParentPort: parentPort,
- ChildIP: childIP,
- ChildPort: childPort,
- }, nil
+ ps.ChildPort, err = strconv.Atoi(parts[childPort])
+ if err != nil {
+ return nil, errors.Wrapf(err, "unexpected ParentPort in PortSpec string: %q", portSpec)
+ }
+
+ return ps, nil
}
// ValidatePortSpec validates *port.Spec.
// existingPorts can be optionally passed for detecting conflicts.
func ValidatePortSpec(spec port.Spec, existingPorts map[int]*port.Status) error {
- if spec.Proto != "tcp" && spec.Proto != "udp" {
- return errors.Errorf("unknown proto: %q", spec.Proto)
+ if err := validateProto(spec.Proto); err != nil {
+ return err
}
if spec.ParentIP != "" {
if net.ParseIP(spec.ParentIP) == nil {
return errors.Errorf("invalid ParentIP: %q", spec.ParentIP)
}
}
+ if spec.ChildIP != "" {
+ if net.ParseIP(spec.ChildIP) == nil {
+ return errors.Errorf("invalid ChildIP: %q", spec.ChildIP)
+ }
+ }
if spec.ParentPort <= 0 || spec.ParentPort > 65535 {
return errors.Errorf("invalid ParentPort: %q", spec.ParentPort)
}
@@ -90,3 +149,12 @@ func ValidatePortSpec(spec port.Spec, existingPorts map[int]*port.Status) error
}
return nil
}
+
+func validateProto(proto string) error {
+ switch proto {
+ case "tcp", "udp", "sctp":
+ return nil
+ default:
+ return errors.Errorf("unknown proto: %q", proto)
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
index 5e20aa414..e6ee8b3ab 100644
--- a/vendor/github.com/sirupsen/logrus/.travis.yml
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -4,14 +4,11 @@ git:
depth: 1
env:
- GO111MODULE=on
-go: [1.13.x, 1.14.x]
-os: [linux, osx]
+go: 1.15.x
+os: linux
install:
- ./travis/install.sh
script:
- - ./travis/cross_build.sh
- - ./travis/lint.sh
- - export GOMAXPROCS=4
- - export GORACE=halt_on_error=1
- - go test -race -v ./...
- - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
+ - go run mage.go -v crossBuild
+ - go run mage.go lint
+ - go run mage.go test
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
index 584026d67..311f2c339 100644
--- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -1,3 +1,30 @@
+# 1.8.0
+
+Correct versioning number replacing v1.7.1.
+
+# 1.7.1
+
+Beware this release has introduced a new public API and its semver is therefore incorrect.
+
+Code quality:
+ * use go 1.15 in travis
+ * use magefile as task runner
+
+Fixes:
+ * small fixes about new go 1.13 error formatting system
+ * Fix for long time race condiction with mutating data hooks
+
+Features:
+ * build support for zos
+
+# 1.7.0
+Fixes:
+ * the dependency toward a windows terminal library has been removed
+
+Features:
+ * a new buffer pool management API has been added
+ * a set of `<LogLevel>Fn()` functions have been added
+
# 1.6.0
Fixes:
* end of line cleanup
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
index 5796706db..5152b6aa4 100644
--- a/vendor/github.com/sirupsen/logrus/README.md
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -402,7 +402,7 @@ func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
}
return append(serialized, '\n'), nil
}
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
index 5a5cbfe7c..c968f6344 100644
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -78,6 +78,14 @@ func NewEntry(logger *Logger) *Entry {
}
}
+func (entry *Entry) Dup() *Entry {
+ data := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
+}
+
// Returns the bytes representation of this entry from the formatter.
func (entry *Entry) Bytes() ([]byte, error) {
return entry.Logger.Formatter.Format(entry)
@@ -123,11 +131,9 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range fields {
isErrField := false
if t := reflect.TypeOf(v); t != nil {
- switch t.Kind() {
- case reflect.Func:
+ switch {
+ case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func:
isErrField = true
- case reflect.Ptr:
- isErrField = t.Elem().Kind() == reflect.Func
}
}
if isErrField {
@@ -212,53 +218,49 @@ func (entry Entry) HasCaller() (has bool) {
entry.Caller != nil
}
-// This function is not declared with a pointer value because otherwise
-// race conditions will occur when using multiple goroutines
-func (entry Entry) log(level Level, msg string) {
+func (entry *Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
- // Default to now, but allow users to override if they want.
- //
- // We don't have to worry about polluting future calls to Entry#log()
- // with this assignment because this function is declared with a
- // non-pointer receiver.
- if entry.Time.IsZero() {
- entry.Time = time.Now()
+ newEntry := entry.Dup()
+
+ if newEntry.Time.IsZero() {
+ newEntry.Time = time.Now()
}
- entry.Level = level
- entry.Message = msg
- entry.Logger.mu.Lock()
- if entry.Logger.ReportCaller {
- entry.Caller = getCaller()
+ newEntry.Level = level
+ newEntry.Message = msg
+
+ newEntry.Logger.mu.Lock()
+ reportCaller := newEntry.Logger.ReportCaller
+ newEntry.Logger.mu.Unlock()
+
+ if reportCaller {
+ newEntry.Caller = getCaller()
}
- entry.Logger.mu.Unlock()
- entry.fireHooks()
+ newEntry.fireHooks()
buffer = getBuffer()
defer func() {
- entry.Buffer = nil
+ newEntry.Buffer = nil
putBuffer(buffer)
}()
buffer.Reset()
- entry.Buffer = buffer
+ newEntry.Buffer = buffer
- entry.write()
+ newEntry.write()
- entry.Buffer = nil
+ newEntry.Buffer = nil
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
- panic(&entry)
+ panic(newEntry)
}
}
func (entry *Entry) fireHooks() {
- entry.Logger.mu.Lock()
- defer entry.Logger.mu.Unlock()
err := entry.Logger.Hooks.Fire(entry.Level, entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
@@ -266,14 +268,14 @@ func (entry *Entry) fireHooks() {
}
func (entry *Entry) write() {
- entry.Logger.mu.Lock()
- defer entry.Logger.mu.Unlock()
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
return
}
- if _, err = entry.Logger.Out.Write(serialized); err != nil {
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+ if _, err := entry.Logger.Out.Write(serialized); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
}
@@ -319,7 +321,6 @@ func (entry *Entry) Fatal(args ...interface{}) {
func (entry *Entry) Panic(args ...interface{}) {
entry.Log(PanicLevel, args...)
- panic(fmt.Sprint(args...))
}
// Entry Printf family functions
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
index b3919d5ea..37004ff34 100644
--- a/vendor/github.com/sirupsen/logrus/go.mod
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -2,6 +2,7 @@ module github.com/sirupsen/logrus
require (
github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/magefile/mage v1.10.0
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/testify v1.2.2
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
index 1edc143be..bce26a188 100644
--- a/vendor/github.com/sirupsen/logrus/go.sum
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -1,5 +1,7 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/magefile/mage v1.10.0 h1:3HiXzCUY12kh9bIuyXShaVe529fJfyqoVM42o/uom2g=
+github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
index ba7f23711..afaf0fc8a 100644
--- a/vendor/github.com/sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -118,7 +118,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
encoder.SetIndent("", " ")
}
if err := encoder.Encode(data); err != nil {
- return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
+ return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err)
}
return b.Bytes(), nil
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
index dbf627c97..337704457 100644
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -12,7 +12,7 @@ import (
// LogFunction For big messages, it can be more efficient to pass a function
// and only call it if the log level is actually enables rather than
// generating the log message and then checking if the level is enabled
-type LogFunction func()[]interface{}
+type LogFunction func() []interface{}
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
diff --git a/vendor/github.com/sirupsen/logrus/magefile.go b/vendor/github.com/sirupsen/logrus/magefile.go
new file mode 100644
index 000000000..9aa603939
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/magefile.go
@@ -0,0 +1,77 @@
+// +build mage
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/magefile/mage/mg"
+ "github.com/magefile/mage/sh"
+)
+
+// getBuildMatrix returns the build matrix from the current version of the go compiler
+func getBuildMatrix() (map[string][]string, error) {
+ jsonData, err := sh.Output("go", "tool", "dist", "list", "-json")
+ if err != nil {
+ return nil, err
+ }
+ var data []struct {
+ Goos string
+ Goarch string
+ }
+ if err := json.Unmarshal([]byte(jsonData), &data); err != nil {
+ return nil, err
+ }
+
+ matrix := map[string][]string{}
+ for _, v := range data {
+ if val, ok := matrix[v.Goos]; ok {
+ matrix[v.Goos] = append(val, v.Goarch)
+ } else {
+ matrix[v.Goos] = []string{v.Goarch}
+ }
+ }
+
+ return matrix, nil
+}
+
+func CrossBuild() error {
+ matrix, err := getBuildMatrix()
+ if err != nil {
+ return err
+ }
+
+ for os, arches := range matrix {
+ for _, arch := range arches {
+ env := map[string]string{
+ "GOOS": os,
+ "GOARCH": arch,
+ }
+ if mg.Verbose() {
+ fmt.Printf("Building for GOOS=%s GOARCH=%s\n", os, arch)
+ }
+ if err := sh.RunWith(env, "go", "build", "./..."); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func Lint() error {
+ gopath := os.Getenv("GOPATH")
+ if gopath == "" {
+ return fmt.Errorf("cannot retrieve GOPATH")
+ }
+
+ return sh.Run(path.Join(gopath, "bin", "golangci-lint"), "run", "./...")
+}
+
+// Run the test suite
+func Test() error {
+ return sh.RunWith(map[string]string{"GORACE": "halt_on_error=1"},
+ "go", "test", "-race", "-v", "./...")
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
index cc4fe6e31..04748b851 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -1,4 +1,4 @@
-// +build linux aix
+// +build linux aix zos
// +build !js
package logrus
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
index 3c28b54ca..8fc698ad6 100644
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -235,6 +235,8 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
+ case InfoLevel:
+ levelColor = blue
default:
levelColor = blue
}
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
new file mode 100644
index 000000000..0d6e61793
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -0,0 +1,48 @@
+run:
+ deadline: 5m
+
+linters:
+ disable-all: true
+ enable:
+ #- bodyclose
+ - deadcode
+ #- depguard
+ #- dogsled
+ #- dupl
+ - errcheck
+ #- exhaustive
+ #- funlen
+ - gas
+ #- gochecknoinits
+ - goconst
+ #- gocritic
+ #- gocyclo
+ #- gofmt
+ - goimports
+ - golint
+ #- gomnd
+ #- goprintffuncname
+ #- gosec
+ #- gosimple
+ - govet
+ - ineffassign
+ - interfacer
+ #- lll
+ - maligned
+ - megacheck
+ #- misspell
+ #- nakedret
+ #- noctx
+ #- nolintlint
+ #- rowserrcheck
+ #- scopelint
+ #- staticcheck
+ - structcheck
+ #- stylecheck
+ #- typecheck
+ - unconvert
+ #- unparam
+ #- unused
+ - varcheck
+ #- whitespace
+ fast: false
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
index a9bd4e547..e0a3b5004 100644
--- a/vendor/github.com/spf13/cobra/.travis.yml
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -1,7 +1,6 @@
language: go
stages:
- - diff
- test
- build
@@ -10,20 +9,20 @@ go:
- 1.13.x
- tip
+env: GO111MODULE=on
+
before_install:
- go get -u github.com/kyoh86/richgo
- go get -u github.com/mitchellh/gox
+ - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin latest
matrix:
allow_failures:
- go: tip
include:
- - stage: diff
- go: 1.13.x
- script: make fmt
- stage: build
go: 1.13.x
script: make cobra_generator
-script:
+script:
- make test
diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md
index 742d6d6e2..8a23b4f85 100644
--- a/vendor/github.com/spf13/cobra/CHANGELOG.md
+++ b/vendor/github.com/spf13/cobra/CHANGELOG.md
@@ -1,11 +1,40 @@
# Cobra Changelog
-## Pending
-* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` @jpmcb
+## v1.1.3
+
+* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility
+
+## v1.1.2
+
+### Notable Changes
+
+* Bump license year to 2021 in golden files (#1309) @Bowbaq
+* Enhance PowerShell completion with custom comp (#1208) @Luap99
+* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670
+* Documentation readability improvements (#1228 etc.) @zaataylor etc.
+* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor
+
+## v1.1.1
+
+* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context.
+* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context.
+
+## v1.1.0
+
+### Notable Changes
+
+* Extend Go completions and revamp zsh comp (#1070)
+* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb
+* Add completion for help command (#1136)
+* Complete subcommands when TraverseChildren is set (#1171)
+* Fix stderr printing functions (#894)
+* fix: fish output redirection (#1247)
## v1.0.0
+
Announcing v1.0.0 of Cobra. 🎉
-**Notable Changes**
+
+### Notable Changes
* Fish completion (including support for Go custom completion) @marckhouzam
* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam
* Remove/replace SetOutput on Command - deprecated @jpmcb
diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md
new file mode 100644
index 000000000..9d16f88fd
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/CONDUCT.md
@@ -0,0 +1,37 @@
+## Cobra User Contract
+
+### Versioning
+Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release.
+
+### Backward Compatibility
+We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released.
+
+### Deprecation
+Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github.
+
+### CVE
+Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one.
+
+### Communication
+Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors.
+
+### Breaking Changes
+Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra.
+
+There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version.
+
+Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release.
+
+Examples of breaking changes include:
+- Removing or renaming exported constant, variable, type, or function.
+- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc...
+ - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing.
+
+There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging.
+
+### CI Testing
+Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang.
+
+### Disclaimer
+Changes to this document and the contents therein are at the discretion of the maintainers.
+None of the contents of this document are legally binding in any way to the maintainers or the users.
diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile
index e9740d1e1..472c73bf1 100644
--- a/vendor/github.com/spf13/cobra/Makefile
+++ b/vendor/github.com/spf13/cobra/Makefile
@@ -1,21 +1,29 @@
BIN="./bin"
SRC=$(shell find . -name "*.go")
+ifeq (, $(shell which golangci-lint))
+$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh")
+endif
+
ifeq (, $(shell which richgo))
$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo")
endif
-.PHONY: fmt vet test cobra_generator install_deps clean
+.PHONY: fmt lint test cobra_generator install_deps clean
default: all
-all: fmt vet test cobra_generator
+all: fmt test cobra_generator
fmt:
$(info ******************** checking formatting ********************)
@test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
-test: install_deps vet
+lint:
+ $(info ******************** running lint tools ********************)
+ golangci-lint run -v
+
+test: install_deps lint
$(info ******************** running tests ********************)
richgo test -v ./...
@@ -28,9 +36,5 @@ install_deps:
$(info ******************** downloading dependencies ********************)
go get -v ./...
-vet:
- $(info ******************** vetting ********************)
- go vet ./...
-
clean:
rm -rf $(BIN)
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 3cf1b25d8..a1b13ddda 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -6,6 +6,7 @@ Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/),
[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to
name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
+[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
@@ -62,8 +63,8 @@ Cobra is built on a structure of commands, arguments & flags.
**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
-The best applications will read like sentences when used. Users will know how
-to use the application because they will natively understand how to use it.
+The best applications read like sentences when used, and as a result, users
+intuitively know how to interact with them.
The pattern to follow is
`APPNAME VERB NOUN --ADJECTIVE.`
@@ -234,11 +235,6 @@ func init() {
rootCmd.AddCommand(initCmd)
}
-func er(msg interface{}) {
- fmt.Println("Error:", msg)
- os.Exit(1)
-}
-
func initConfig() {
if cfgFile != "" {
// Use config file from the flag.
@@ -246,9 +242,7 @@ func initConfig() {
} else {
// Find home directory.
home, err := homedir.Dir()
- if err != nil {
- er(err)
- }
+ cobra.CheckErr(err)
// Search config in home directory with name ".cobra" (without extension).
viper.AddConfigPath(home)
@@ -268,7 +262,7 @@ func initConfig() {
With the root command you need to have your main function execute it.
Execute should be run on the root for clarity, though it can be called on any command.
-In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra.
```go
package main
@@ -363,7 +357,7 @@ There are two different approaches to assign a flag.
### Persistent Flags
-A flag can be 'persistent' meaning that this flag will be available to the
+A flag can be 'persistent', meaning that this flag will be available to the
command it's assigned to as well as every command under that command. For
global flags, assign a flag as a persistent flag on the root.
@@ -373,7 +367,7 @@ rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose out
### Local Flags
-A flag can also be assigned locally which will only apply to that specific command.
+A flag can also be assigned locally, which will only apply to that specific command.
```go
localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
@@ -381,8 +375,8 @@ localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to rea
### Local Flag on Parent Commands
-By default Cobra only parses local flags on the target command, any local flags on
-parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will
+By default, Cobra only parses local flags on the target command, and any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will
parse local flags on each command before executing the target command.
```go
@@ -404,8 +398,8 @@ func init() {
}
```
-In this example the persistent flag `author` is bound with `viper`.
-**Note**, that the variable `author` will not be set to the value from config,
+In this example, the persistent flag `author` is bound with `viper`.
+**Note**: the variable `author` will not be set to the value from config,
when the `--author` flag is not provided by user.
More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
@@ -465,7 +459,7 @@ var cmd = &cobra.Command{
In the example below, we have defined three commands. Two are at the top level
and one (cmdTimes) is a child of one of the top commands. In this case the root
-is not executable meaning that a subcommand is required. This is accomplished
+is not executable, meaning that a subcommand is required. This is accomplished
by not providing a 'Run' for the 'rootCmd'.
We have only defined one flag for a single command.
@@ -759,7 +753,7 @@ Cobra can generate documentation based on subcommands, flags, etc. Read more abo
## Generating shell completions
-Cobra can generate a shell-completion file for the following shells: Bash, Zsh, Fish, Powershell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md).
+Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md).
# License
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
index 846636d75..710614793 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.go
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -19,9 +19,9 @@ const (
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
)
-func writePreamble(buf *bytes.Buffer, name string) {
- buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
- buf.WriteString(fmt.Sprintf(`
+func writePreamble(buf io.StringWriter, name string) {
+ WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
__%[1]s_debug()
{
if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
@@ -380,10 +380,10 @@ __%[1]s_handle_word()
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
}
-func writePostscript(buf *bytes.Buffer, name string) {
+func writePostscript(buf io.StringWriter, name string) {
name = strings.Replace(name, ":", "__", -1)
- buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
- buf.WriteString(fmt.Sprintf(`{
+ WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`{
local cur prev words cword
declare -A flaghash 2>/dev/null || :
declare -A aliashash 2>/dev/null || :
@@ -410,33 +410,33 @@ func writePostscript(buf *bytes.Buffer, name string) {
}
`, name))
- buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+ WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
complete -o default -F __start_%s %s
else
complete -o default -o nospace -F __start_%s %s
fi
`, name, name, name, name))
- buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
+ WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n")
}
-func writeCommands(buf *bytes.Buffer, cmd *Command) {
- buf.WriteString(" commands=()\n")
+func writeCommands(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " commands=()\n")
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() && c != cmd.helpCommand {
continue
}
- buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
+ WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name()))
writeCmdAliases(buf, c)
}
- buf.WriteString("\n")
+ WriteStringAndCheck(buf, "\n")
}
-func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
+func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) {
for key, value := range annotations {
switch key {
case BashCompFilenameExt:
- buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
var ext string
if len(value) > 0 {
@@ -444,17 +444,18 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s
} else {
ext = "_filedir"
}
- buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
case BashCompCustom:
- buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+
if len(value) > 0 {
handlers := strings.Join(value, "; ")
- buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
} else {
- buf.WriteString(" flags_completion+=(:)\n")
+ WriteStringAndCheck(buf, " flags_completion+=(:)\n")
}
case BashCompSubdirsInDir:
- buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
var ext string
if len(value) == 1 {
@@ -462,46 +463,48 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s
} else {
ext = "_filedir -d"
}
- buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
+ WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
}
}
}
-func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+const cbn = "\")\n"
+
+func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
name := flag.Shorthand
format := " "
if len(flag.NoOptDefVal) == 0 {
format += "two_word_"
}
- format += "flags+=(\"-%s\")\n"
- buf.WriteString(fmt.Sprintf(format, name))
+ format += "flags+=(\"-%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
}
-func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
name := flag.Name
format := " flags+=(\"--%s"
if len(flag.NoOptDefVal) == 0 {
format += "="
}
- format += "\")\n"
- buf.WriteString(fmt.Sprintf(format, name))
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
if len(flag.NoOptDefVal) == 0 {
- format = " two_word_flags+=(\"--%s\")\n"
- buf.WriteString(fmt.Sprintf(format, name))
+ format = " two_word_flags+=(\"--%s" + cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
}
writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
}
-func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
+func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
name := flag.Name
- format := " local_nonpersistent_flags+=(\"--%[1]s\")\n"
+ format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn
if len(flag.NoOptDefVal) == 0 {
- format += " local_nonpersistent_flags+=(\"--%[1]s=\")\n"
+ format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn
}
- buf.WriteString(fmt.Sprintf(format, name))
+ WriteStringAndCheck(buf, fmt.Sprintf(format, name))
if len(flag.Shorthand) > 0 {
- buf.WriteString(fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
+ WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
}
}
@@ -519,9 +522,9 @@ func prepareCustomAnnotationsForFlags(cmd *Command) {
}
}
-func writeFlags(buf *bytes.Buffer, cmd *Command) {
+func writeFlags(buf io.StringWriter, cmd *Command) {
prepareCustomAnnotationsForFlags(cmd)
- buf.WriteString(` flags=()
+ WriteStringAndCheck(buf, ` flags=()
two_word_flags=()
local_nonpersistent_flags=()
flags_with_completion=()
@@ -553,11 +556,11 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) {
}
})
- buf.WriteString("\n")
+ WriteStringAndCheck(buf, "\n")
}
-func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
- buf.WriteString(" must_have_one_flag=()\n")
+func writeRequiredFlag(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_flag=()\n")
flags := cmd.NonInheritedFlags()
flags.VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
@@ -570,55 +573,55 @@ func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
if flag.Value.Type() != "bool" {
format += "="
}
- format += "\")\n"
- buf.WriteString(fmt.Sprintf(format, flag.Name))
+ format += cbn
+ WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name))
if len(flag.Shorthand) > 0 {
- buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand))
}
}
}
})
}
-func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
- buf.WriteString(" must_have_one_noun=()\n")
- sort.Sort(sort.StringSlice(cmd.ValidArgs))
+func writeRequiredNouns(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " must_have_one_noun=()\n")
+ sort.Strings(cmd.ValidArgs)
for _, value := range cmd.ValidArgs {
// Remove any description that may be included following a tab character.
// Descriptions are not supported by bash completion.
value = strings.Split(value, "\t")[0]
- buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
+ WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
}
if cmd.ValidArgsFunction != nil {
- buf.WriteString(" has_completion_function=1\n")
+ WriteStringAndCheck(buf, " has_completion_function=1\n")
}
}
-func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
+func writeCmdAliases(buf io.StringWriter, cmd *Command) {
if len(cmd.Aliases) == 0 {
return
}
- sort.Sort(sort.StringSlice(cmd.Aliases))
+ sort.Strings(cmd.Aliases)
- buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+ WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
for _, value := range cmd.Aliases {
- buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value))
- buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
+ WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value))
+ WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
}
- buf.WriteString(` fi`)
- buf.WriteString("\n")
+ WriteStringAndCheck(buf, ` fi`)
+ WriteStringAndCheck(buf, "\n")
}
-func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
- buf.WriteString(" noun_aliases=()\n")
- sort.Sort(sort.StringSlice(cmd.ArgAliases))
+func writeArgAliases(buf io.StringWriter, cmd *Command) {
+ WriteStringAndCheck(buf, " noun_aliases=()\n")
+ sort.Strings(cmd.ArgAliases)
for _, value := range cmd.ArgAliases {
- buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
+ WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value))
}
}
-func gen(buf *bytes.Buffer, cmd *Command) {
+func gen(buf io.StringWriter, cmd *Command) {
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() && c != cmd.helpCommand {
continue
@@ -630,22 +633,22 @@ func gen(buf *bytes.Buffer, cmd *Command) {
commandName = strings.Replace(commandName, ":", "__", -1)
if cmd.Root() == cmd {
- buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
} else {
- buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
+ WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName))
}
- buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
- buf.WriteString("\n")
- buf.WriteString(" command_aliases=()\n")
- buf.WriteString("\n")
+ WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName))
+ WriteStringAndCheck(buf, "\n")
+ WriteStringAndCheck(buf, " command_aliases=()\n")
+ WriteStringAndCheck(buf, "\n")
writeCommands(buf, cmd)
writeFlags(buf, cmd)
writeRequiredFlag(buf, cmd)
writeRequiredNouns(buf, cmd)
writeArgAliases(buf, cmd)
- buf.WriteString("}\n\n")
+ WriteStringAndCheck(buf, "}\n\n")
}
// GenBashCompletion generates bash completion file and writes to the passed writer.
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
index a82d5bb8b..130f99b92 100644
--- a/vendor/github.com/spf13/cobra/bash_completions.md
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -4,7 +4,7 @@ Please refer to [Shell Completions](shell_completions.md) for details.
## Bash legacy dynamic completions
-For backwards-compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution.
+For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution.
The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions.
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
index d01becc8f..d6cbfd719 100644
--- a/vendor/github.com/spf13/cobra/cobra.go
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -19,6 +19,7 @@ package cobra
import (
"fmt"
"io"
+ "os"
"reflect"
"strconv"
"strings"
@@ -205,3 +206,17 @@ func stringInSlice(a string, list []string) bool {
}
return false
}
+
+// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing.
+func CheckErr(msg interface{}) {
+ if msg != nil {
+ fmt.Fprintln(os.Stderr, "Error:", msg)
+ os.Exit(1)
+ }
+}
+
+// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil.
+func WriteStringAndCheck(b io.StringWriter, s string) {
+ _, err := b.WriteString(s)
+ CheckErr(err)
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index 77b399e02..d6732ad11 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -84,9 +84,6 @@ type Command struct {
// Deprecated defines, if this command is deprecated and should print this string when used.
Deprecated string
- // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
- Hidden bool
-
// Annotations are key/value pairs that can be used by applications to identify or
// group commands.
Annotations map[string]string
@@ -126,55 +123,6 @@ type Command struct {
// PersistentPostRunE: PersistentPostRun but returns an error.
PersistentPostRunE func(cmd *Command, args []string) error
- // SilenceErrors is an option to quiet errors down stream.
- SilenceErrors bool
-
- // SilenceUsage is an option to silence usage when an error occurs.
- SilenceUsage bool
-
- // DisableFlagParsing disables the flag parsing.
- // If this is true all flags will be passed to the command as arguments.
- DisableFlagParsing bool
-
- // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
- // will be printed by generating docs for this command.
- DisableAutoGenTag bool
-
- // DisableFlagsInUseLine will disable the addition of [flags] to the usage
- // line of a command when printing help or generating docs
- DisableFlagsInUseLine bool
-
- // DisableSuggestions disables the suggestions based on Levenshtein distance
- // that go along with 'unknown command' messages.
- DisableSuggestions bool
- // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
- // Must be > 0.
- SuggestionsMinimumDistance int
-
- // TraverseChildren parses flags on all parents before executing child command.
- TraverseChildren bool
-
- // FParseErrWhitelist flag parse errors to be ignored
- FParseErrWhitelist FParseErrWhitelist
-
- ctx context.Context
-
- // commands is the list of commands supported by this program.
- commands []*Command
- // parent is a parent command for this command.
- parent *Command
- // Max lengths of commands' string lengths for use in padding.
- commandsMaxUseLen int
- commandsMaxCommandPathLen int
- commandsMaxNameLen int
- // commandsAreSorted defines, if command slice are sorted or not.
- commandsAreSorted bool
- // commandCalledAs is the name or alias value used to call this command.
- commandCalledAs struct {
- name string
- called bool
- }
-
// args is actual args parsed from flags.
args []string
// flagErrorBuf contains all error messages from pflag.
@@ -216,6 +164,60 @@ type Command struct {
outWriter io.Writer
// errWriter is a writer defined by the user that replaces stderr
errWriter io.Writer
+
+ //FParseErrWhitelist flag parse errors to be ignored
+ FParseErrWhitelist FParseErrWhitelist
+
+ // commandsAreSorted defines, if command slice are sorted or not.
+ commandsAreSorted bool
+ // commandCalledAs is the name or alias value used to call this command.
+ commandCalledAs struct {
+ name string
+ called bool
+ }
+
+ ctx context.Context
+
+ // commands is the list of commands supported by this program.
+ commands []*Command
+ // parent is a parent command for this command.
+ parent *Command
+ // Max lengths of commands' string lengths for use in padding.
+ commandsMaxUseLen int
+ commandsMaxCommandPathLen int
+ commandsMaxNameLen int
+
+ // TraverseChildren parses flags on all parents before executing child command.
+ TraverseChildren bool
+
+ // Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+ Hidden bool
+
+ // SilenceErrors is an option to quiet errors down stream.
+ SilenceErrors bool
+
+ // SilenceUsage is an option to silence usage when an error occurs.
+ SilenceUsage bool
+
+ // DisableFlagParsing disables the flag parsing.
+ // If this is true all flags will be passed to the command as arguments.
+ DisableFlagParsing bool
+
+ // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+ // will be printed by generating docs for this command.
+ DisableAutoGenTag bool
+
+ // DisableFlagsInUseLine will disable the addition of [flags] to the usage
+ // line of a command when printing help or generating docs
+ DisableFlagsInUseLine bool
+
+ // DisableSuggestions disables the suggestions based on Levenshtein distance
+ // that go along with 'unknown command' messages.
+ DisableSuggestions bool
+
+ // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+ // Must be > 0.
+ SuggestionsMinimumDistance int
}
// Context returns underlying command context. If command wasn't
@@ -418,7 +420,7 @@ func (c *Command) UsageString() string {
c.outWriter = bb
c.errWriter = bb
- c.Usage()
+ CheckErr(c.Usage())
// Setting things back to normal
c.outWriter = tmpOutput
@@ -964,13 +966,13 @@ func (c *Command) ExecuteC() (cmd *Command, err error) {
return cmd, nil
}
- // If root command has SilentErrors flagged,
+ // If root command has SilenceErrors flagged,
// all subcommands should respect it
if !cmd.SilenceErrors && !c.SilenceErrors {
c.PrintErrln("Error:", err.Error())
}
- // If root command has SilentUsage flagged,
+ // If root command has SilenceUsage flagged,
// all subcommands should respect it
if !cmd.SilenceUsage && !c.SilenceUsage {
c.Println(cmd.UsageString())
@@ -1087,10 +1089,10 @@ Simply type ` + c.Name() + ` help [path to command] for full details.`,
cmd, _, e := c.Root().Find(args)
if cmd == nil || e != nil {
c.Printf("Unknown help topic %#q\n", args)
- c.Root().Usage()
+ CheckErr(c.Root().Usage())
} else {
cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
- cmd.Help()
+ CheckErr(cmd.Help())
}
},
}
diff --git a/vendor/github.com/spf13/cobra/custom_completions.go b/vendor/github.com/spf13/cobra/custom_completions.go
index f9e88e081..fa060c147 100644
--- a/vendor/github.com/spf13/cobra/custom_completions.go
+++ b/vendor/github.com/spf13/cobra/custom_completions.go
@@ -527,13 +527,13 @@ func CompDebug(msg string, printToStdErr bool) {
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err == nil {
defer f.Close()
- f.WriteString(msg)
+ WriteStringAndCheck(f, msg)
}
}
if printToStdErr {
// Must print to stderr for this not to be read by the completion script.
- fmt.Fprintf(os.Stderr, msg)
+ fmt.Fprint(os.Stderr, msg)
}
}
diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go
index eaae9bca8..3e112347d 100644
--- a/vendor/github.com/spf13/cobra/fish_completions.go
+++ b/vendor/github.com/spf13/cobra/fish_completions.go
@@ -8,7 +8,7 @@ import (
"strings"
)
-func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) {
+func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
// Variables should not contain a '-' or ':' character
nameForVar := name
nameForVar = strings.Replace(nameForVar, "-", "_", -1)
@@ -18,8 +18,8 @@ func genFishComp(buf *bytes.Buffer, name string, includeDesc bool) {
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
- buf.WriteString(fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
- buf.WriteString(fmt.Sprintf(`
+ WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
+ WriteStringAndCheck(buf, fmt.Sprintf(`
function __%[1]s_debug
set file "$BASH_COMP_DEBUG_FILE"
if test -n "$file"
diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod
index 57e3244d5..ff5614405 100644
--- a/vendor/github.com/spf13/cobra/go.mod
+++ b/vendor/github.com/spf13/cobra/go.mod
@@ -8,5 +8,5 @@ require (
github.com/mitchellh/go-homedir v1.1.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.7.0
- gopkg.in/yaml.v2 v2.2.8
+ gopkg.in/yaml.v2 v2.4.0
)
diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum
index 0aae73863..9328ee3ee 100644
--- a/vendor/github.com/spf13/cobra/go.sum
+++ b/vendor/github.com/spf13/cobra/go.sum
@@ -304,8 +304,8 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
index 756c61b9d..c55be71cd 100644
--- a/vendor/github.com/spf13/cobra/powershell_completions.go
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -1,6 +1,3 @@
-// PowerShell completions are based on the amazing work from clap:
-// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs
-//
// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
// can be downloaded separately for windows 7 or 8.1).
@@ -11,90 +8,278 @@ import (
"fmt"
"io"
"os"
- "strings"
-
- "github.com/spf13/pflag"
)
-var powerShellCompletionTemplate = `using namespace System.Management.Automation
-using namespace System.Management.Automation.Language
-Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock {
- param($wordToComplete, $commandAst, $cursorPosition)
- $commandElements = $commandAst.CommandElements
- $command = @(
- '%s'
- for ($i = 1; $i -lt $commandElements.Count; $i++) {
- $element = $commandElements[$i]
- if ($element -isnot [StringConstantExpressionAst] -or
- $element.StringConstantType -ne [StringConstantType]::BareWord -or
- $element.Value.StartsWith('-')) {
- break
- }
- $element.Value
- }
- ) -join ';'
- $completions = @(switch ($command) {%s
- })
- $completions.Where{ $_.CompletionText -like "$wordToComplete*" } |
- Sort-Object -Property ListItemText
-}`
-
-func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) {
- var cmdName string
- if previousCommandName == "" {
- cmdName = cmd.Name()
- } else {
- cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name())
- }
-
- fmt.Fprintf(out, "\n '%s' {", cmdName)
-
- cmd.Flags().VisitAll(func(flag *pflag.Flag) {
- if nonCompletableFlag(flag) {
- return
- }
- usage := escapeStringForPowerShell(flag.Usage)
- if len(flag.Shorthand) > 0 {
- fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage)
- }
- fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage)
- })
-
- for _, subCmd := range cmd.Commands() {
- usage := escapeStringForPowerShell(subCmd.Short)
- fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage)
+func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) {
+ compCmd := ShellCompRequestCmd
+ if !includeDesc {
+ compCmd = ShellCompNoDescRequestCmd
}
+ WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*-
- fmt.Fprint(out, "\n break\n }")
-
- for _, subCmd := range cmd.Commands() {
- generatePowerShellSubcommandCases(out, subCmd, cmdName)
- }
+function __%[1]s_debug {
+ if ($env:BASH_COMP_DEBUG_FILE) {
+ "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE"
+ }
}
-func escapeStringForPowerShell(s string) string {
- return strings.Replace(s, "'", "''", -1)
+filter __%[1]s_escapeStringWithSpecialChars {
+`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+`
}
-// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer.
-func (c *Command) GenPowerShellCompletion(w io.Writer) error {
- buf := new(bytes.Buffer)
+Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock {
+ param(
+ $WordToComplete,
+ $CommandAst,
+ $CursorPosition
+ )
+
+ # Get the current command line and convert into a string
+ $Command = $CommandAst.CommandElements
+ $Command = "$Command"
+
+ __%[1]s_debug ""
+ __%[1]s_debug "========= starting completion logic =========="
+ __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition"
+
+ # The user could have moved the cursor backwards on the command-line.
+ # We need to trigger completion from the $CursorPosition location, so we need
+ # to truncate the command-line ($Command) up to the $CursorPosition location.
+ # Make sure the $Command is longer then the $CursorPosition before we truncate.
+ # This happens because the $Command does not include the last space.
+ if ($Command.Length -gt $CursorPosition) {
+ $Command=$Command.Substring(0,$CursorPosition)
+ }
+ __%[1]s_debug "Truncated command: $Command"
+
+ $ShellCompDirectiveError=%[3]d
+ $ShellCompDirectiveNoSpace=%[4]d
+ $ShellCompDirectiveNoFileComp=%[5]d
+ $ShellCompDirectiveFilterFileExt=%[6]d
+ $ShellCompDirectiveFilterDirs=%[7]d
+
+ # Prepare the command to request completions for the program.
+ # Split the command at the first space to separate the program and arguments.
+ $Program,$Arguments = $Command.Split(" ",2)
+ $RequestComp="$Program %[2]s $Arguments"
+ __%[1]s_debug "RequestComp: $RequestComp"
+
+ # we cannot use $WordToComplete because it
+ # has the wrong values if the cursor was moved
+ # so use the last argument
+ if ($WordToComplete -ne "" ) {
+ $WordToComplete = $Arguments.Split(" ")[-1]
+ }
+ __%[1]s_debug "New WordToComplete: $WordToComplete"
+
+
+ # Check for flag with equal sign
+ $IsEqualFlag = ($WordToComplete -Like "--*=*" )
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Completing equal sign flag"
+ # Remove the flag part
+ $Flag,$WordToComplete = $WordToComplete.Split("=",2)
+ }
+
+ if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) {
+ # If the last parameter is complete (there is a space following it)
+ # We add an extra empty parameter so we can indicate this to the go method.
+ __%[1]s_debug "Adding extra empty parameter"
+`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+`
+`+" $RequestComp=\"$RequestComp\" + ' `\"`\"' "+`
+ }
+
+ __%[1]s_debug "Calling $RequestComp"
+ #call the command store the output in $out and redirect stderr and stdout to null
+ # $Out is an array contains each line per element
+ Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
+
+
+ # get directive from last line
+ [int]$Directive = $Out[-1].TrimStart(':')
+ if ($Directive -eq "") {
+ # There is no directive specified
+ $Directive = 0
+ }
+ __%[1]s_debug "The completion directive is: $Directive"
+
+ # remove directive (last element) from out
+ $Out = $Out | Where-Object { $_ -ne $Out[-1] }
+ __%[1]s_debug "The completions are: $Out"
+
+ if (($Directive -band $ShellCompDirectiveError) -ne 0 ) {
+ # Error code. No completion.
+ __%[1]s_debug "Received error from custom completion go code"
+ return
+ }
+
+ $Longest = 0
+ $Values = $Out | ForEach-Object {
+ #Split the output in name and description
+`+" $Name, $Description = $_.Split(\"`t\",2)"+`
+ __%[1]s_debug "Name: $Name Description: $Description"
+
+ # Look for the longest completion so that we can format things nicely
+ if ($Longest -lt $Name.Length) {
+ $Longest = $Name.Length
+ }
+
+ # Set the description to a one space string if there is none set.
+ # This is needed because the CompletionResult does not accept an empty string as argument
+ if (-Not $Description) {
+ $Description = " "
+ }
+ @{Name="$Name";Description="$Description"}
+ }
+
+
+ $Space = " "
+ if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) {
+ # remove the space here
+ __%[1]s_debug "ShellCompDirectiveNoSpace is called"
+ $Space = ""
+ }
+
+ if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
+ __%[1]s_debug "ShellCompDirectiveNoFileComp is called"
+
+ if ($Values.Length -eq 0) {
+ # Just print an empty string here so the
+ # shell does not start to complete paths.
+ # We cannot use CompletionResult here because
+ # it does not accept an empty string as argument.
+ ""
+ return
+ }
+ }
+
+ if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or
+ (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) {
+ __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported"
+
+ # return here to prevent the completion of the extensions
+ return
+ }
- var subCommandCases bytes.Buffer
- generatePowerShellSubcommandCases(&subCommandCases, c, "")
- fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String())
+ $Values = $Values | Where-Object {
+ # filter the result
+ $_.Name -like "$WordToComplete*"
+ # Join the flag back if we have a equal sign flag
+ if ( $IsEqualFlag ) {
+ __%[1]s_debug "Join the equal sign flag back to the completion value"
+ $_.Name = $Flag + "=" + $_.Name
+ }
+ }
+
+ # Get the current mode
+ $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function
+ __%[1]s_debug "Mode: $Mode"
+
+ $Values | ForEach-Object {
+
+ # store temporay because switch will overwrite $_
+ $comp = $_
+
+ # PowerShell supports three different completion modes
+ # - TabCompleteNext (default windows style - on each key press the next option is displayed)
+ # - Complete (works like bash)
+ # - MenuComplete (works like zsh)
+ # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function <mode>
+
+ # CompletionResult Arguments:
+ # 1) CompletionText text to be used as the auto completion result
+ # 2) ListItemText text to be displayed in the suggestion list
+ # 3) ResultType type of completion result
+ # 4) ToolTip text for the tooltip with details about the object
+
+ switch ($Mode) {
+
+ # bash like
+ "Complete" {
+
+ if ($Values.Length -eq 1) {
+ __%[1]s_debug "Only one completion left"
+
+ # insert space after value
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+
+ } else {
+ # Add the proper number of spaces to align the descriptions
+ while($comp.Name.Length -lt $Longest) {
+ $comp.Name = $comp.Name + " "
+ }
+
+ # Check for empty description and only add parentheses if needed
+ if ($($comp.Description) -eq " " ) {
+ $Description = ""
+ } else {
+ $Description = " ($($comp.Description))"
+ }
+
+ [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ # zsh like
+ "MenuComplete" {
+ # insert space after value
+ # MenuComplete will automatically show the ToolTip of
+ # the highlighted value at the bottom of the suggestions.
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+
+ # TabCompleteNext and in case we get something unknown
+ Default {
+ # Like MenuComplete but we don't want to add a space here because
+ # the user need to press space anyway to get the completion.
+ # Description will not be shown because thats not possible with TabCompleteNext
+ [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
+ }
+ }
+
+ }
+}
+`, name, compCmd,
+ ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
+ ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs))
+}
+
+func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
+ buf := new(bytes.Buffer)
+ genPowerShellComp(buf, c.Name(), includeDesc)
_, err := buf.WriteTo(w)
return err
}
-// GenPowerShellCompletionFile generates PowerShell completion file.
-func (c *Command) GenPowerShellCompletionFile(filename string) error {
+func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
- return c.GenPowerShellCompletion(outFile)
+ return c.genPowerShellCompletion(outFile, includeDesc)
+}
+
+// GenPowerShellCompletionFile generates powershell completion file without descriptions.
+func (c *Command) GenPowerShellCompletionFile(filename string) error {
+ return c.genPowerShellCompletionFile(filename, false)
+}
+
+// GenPowerShellCompletion generates powershell completion file without descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletion(w io.Writer) error {
+ return c.genPowerShellCompletion(w, false)
+}
+
+// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
+func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error {
+ return c.genPowerShellCompletionFile(filename, true)
+}
+
+// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
+// and writes it to the passed writer.
+func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error {
+ return c.genPowerShellCompletion(w, true)
}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md
index 55f154a68..c449f1e5c 100644
--- a/vendor/github.com/spf13/cobra/powershell_completions.md
+++ b/vendor/github.com/spf13/cobra/powershell_completions.md
@@ -1,16 +1,3 @@
# Generating PowerShell Completions For Your Own cobra.Command
-Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles.
-
-*Note*: PowerShell completions have not (yet?) been aligned to Cobra's generic shell completion support. This implies the PowerShell completions are not as rich as for other shells (see [What's not yet supported](#whats-not-yet-supported)), and may behave slightly differently. They are still very useful for PowerShell users.
-
-# What's supported
-
-- Completion for subcommands using their `.Short` description
-- Completion for non-hidden flags using their `.Name` and `.Shorthand`
-
-# What's not yet supported
-
-- Command aliases
-- Required, filename or custom flags (they will work like normal flags)
-- Custom completion scripts
+Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details.
diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md
index 31c272036..d98a71e36 100644
--- a/vendor/github.com/spf13/cobra/projects_using_cobra.md
+++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md
@@ -25,6 +25,8 @@
- [Moby (former Docker)](https://github.com/moby/moby)
- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
- [OpenShift](https://www.openshift.com/)
+- [Ory Hydra](https://github.com/ory/hydra)
+- [Ory Kratos](https://github.com/ory/kratos)
- [Pouch](https://github.com/alibaba/pouch)
- [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
- [Prototool](https://github.com/uber/prototool)
@@ -32,4 +34,5 @@
- [Rclone](https://rclone.org/)
- [Skaffold](https://skaffold.dev/)
- [Tendermint](https://github.com/tendermint/tendermint)
+- [Twitch CLI](https://github.com/twitchdev/twitch-cli)
- [Werf](https://werf.io/)
diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md
index d8416ab1d..cd533ac3d 100644
--- a/vendor/github.com/spf13/cobra/shell_completions.md
+++ b/vendor/github.com/spf13/cobra/shell_completions.md
@@ -4,10 +4,10 @@ Cobra can generate shell completions for multiple shells.
The currently supported shells are:
- Bash
- Zsh
-- Fish
+- fish
- PowerShell
-If you are using the generator you can create a completion command by running
+If you are using the generator, you can create a completion command by running
```bash
cobra add completion
@@ -17,38 +17,46 @@ and then modifying the generated `cmd/completion.go` file to look something like
```go
var completionCmd = &cobra.Command{
- Use: "completion [bash|zsh|fish|powershell]",
- Short: "Generate completion script",
+ Use: "completion [bash|zsh|fish|powershell]",
+ Short: "Generate completion script",
Long: `To load completions:
Bash:
-$ source <(yourprogram completion bash)
+ $ source <(yourprogram completion bash)
-# To load completions for each session, execute once:
-Linux:
+ # To load completions for each session, execute once:
+ # Linux:
$ yourprogram completion bash > /etc/bash_completion.d/yourprogram
-MacOS:
+ # macOS:
$ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram
Zsh:
-# If shell completion is not already enabled in your environment you will need
-# to enable it. You can execute the following once:
+ # If shell completion is not already enabled in your environment,
+ # you will need to enable it. You can execute the following once:
-$ echo "autoload -U compinit; compinit" >> ~/.zshrc
+ $ echo "autoload -U compinit; compinit" >> ~/.zshrc
-# To load completions for each session, execute once:
-$ yourprogram completion zsh > "${fpath[1]}/_yourprogram"
+ # To load completions for each session, execute once:
+ $ yourprogram completion zsh > "${fpath[1]}/_yourprogram"
-# You will need to start a new shell for this setup to take effect.
+ # You will need to start a new shell for this setup to take effect.
-Fish:
+fish:
-$ yourprogram completion fish | source
+ $ yourprogram completion fish | source
-# To load completions for each session, execute once:
-$ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish
+ # To load completions for each session, execute once:
+ $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish
+
+PowerShell:
+
+ PS> yourprogram completion powershell | Out-String | Invoke-Expression
+
+ # To load completions for every new session, run:
+ PS> yourprogram completion powershell > yourprogram.ps1
+ # and source this file from your PowerShell profile.
`,
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
@@ -68,7 +76,7 @@ $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish
}
```
-**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script so must be removed.
+**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed.
# Customizing completions
@@ -91,8 +99,7 @@ cmd := &cobra.Command{
Long: get_long,
Example: get_example,
Run: func(cmd *cobra.Command, args []string) {
- err := RunGet(f, out, cmd, args)
- util.CheckErr(err)
+ cobra.CheckErr(RunGet(f, out, cmd, args))
},
ValidArgs: validArgs,
}
@@ -124,7 +131,7 @@ the completion algorithm if entered manually, e.g. in:
```bash
$ kubectl get rc [tab][tab]
-backend frontend database
+backend frontend database
```
Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of
@@ -246,7 +253,7 @@ and you'll get something like
```bash
$ kubectl exec [tab][tab]
--c --container= -p --pod=
+-c --container= -p --pod=
```
### Specify dynamic flag completion
@@ -316,7 +323,7 @@ cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string,
```
### Descriptions for completions
-Both `zsh` and `fish` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh:
+`zsh`, `fish` and `powershell` allow for descriptions to annotate completion choices. For commands and flags, Cobra will provide the descriptions automatically, based on usage information. For example, using zsh:
```
$ helm s[tab]
search -- search for a keyword in charts
@@ -361,12 +368,12 @@ completion firstcommand secondcommand
```
### Bash legacy dynamic completions
-For backwards-compatibility, Cobra still supports its bash legacy dynamic completion solution.
+For backward compatibility, Cobra still supports its bash legacy dynamic completion solution.
Please refer to [Bash Completions](bash_completions.md) for details.
## Zsh completions
-Cobra supports native Zsh completion generated from the root `cobra.Command`.
+Cobra supports native zsh completion generated from the root `cobra.Command`.
The generated completion script should be put somewhere in your `$fpath` and be named
`_<yourProgram>`. You will need to start a new shell for the completions to become available.
@@ -385,23 +392,23 @@ status -- displays the status of the named release
$ helm s[tab]
search show status
```
-*Note*: Because of backwards-compatibility requirements, we were forced to have a different API to disable completion descriptions between `Zsh` and `Fish`.
+*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`.
### Limitations
* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation).
- * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`).
+ * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`).
* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`.
* You should instead use `RegisterFlagCompletionFunc()`.
### Zsh completions standardization
-Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced.
+Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced.
Please refer to [Zsh Completions](zsh_completions.md) for details.
-## Fish completions
+## fish completions
-Cobra supports native Fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
+Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
```
# With descriptions
$ helm s[tab]
@@ -411,12 +418,12 @@ search (search for a keyword in charts) show (show information of a chart) s
$ helm s[tab]
search show status
```
-*Note*: Because of backwards-compatibility requirements, we were forced to have a different API to disable completion descriptions between `Zsh` and `Fish`.
+*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`.
### Limitations
-* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation).
- * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`).
+* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation).
+ * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`).
* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`.
* You should instead use `RegisterFlagCompletionFunc()`.
* The following flag completion annotations are not supported and will be ignored for `fish`:
@@ -431,4 +438,46 @@ search show status
## PowerShell completions
-Please refer to [PowerShell Completions](powershell_completions.md) for details.
+Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users.
+
+The script is designed to support all three PowerShell completion modes:
+
+* TabCompleteNext (default windows style - on each key press the next option is displayed)
+* Complete (works like bash)
+* MenuComplete (works like zsh)
+
+You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function <mode>`. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode.
+
+Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles.
+
+```
+# With descriptions and Mode 'Complete'
+$ helm s[tab]
+search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release)
+
+# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions.
+$ helm s[tab]
+search show status
+
+search for a keyword in charts
+
+# Without descriptions
+$ helm s[tab]
+search show status
+```
+
+### Limitations
+
+* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation).
+ * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`).
+* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`.
+ * You should instead use `RegisterFlagCompletionFunc()`.
+* The following flag completion annotations are not supported and will be ignored for `powershell`:
+ * `BashCompFilenameExt` (filtering by file extension)
+ * `BashCompSubdirsInDir` (filtering by directory)
+* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`:
+ * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension)
+ * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory)
+* Similarly, the following completion directives are not supported and will be ignored for `powershell`:
+ * `ShellCompDirectiveFilterFileExt` (filtering by file extension)
+ * `ShellCompDirectiveFilterDirs` (filtering by directory)
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
index 92a70394a..2e840285f 100644
--- a/vendor/github.com/spf13/cobra/zsh_completions.go
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -70,12 +70,12 @@ func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error {
return err
}
-func genZshComp(buf *bytes.Buffer, name string, includeDesc bool) {
+func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
compCmd := ShellCompRequestCmd
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
- buf.WriteString(fmt.Sprintf(`#compdef _%[1]s %[1]s
+ WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s
# zsh completion for %-36[1]s -*- shell-script -*-