diff options
Diffstat (limited to 'vendor')
43 files changed, 2523 insertions, 305 deletions
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index d0b56c7f6..9657ecb69 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -269,6 +269,9 @@ type EngineConfig struct { // RemoteURI containers connection information used to connect to remote system. RemoteURI string `toml:"remote_uri,omitempty"` + // Identity key file for RemoteURI + RemoteIdentity string `toml:"remote_identity,omitempty"` + // RuntimePath is the path to OCI runtime binary for launching containers. // The first path pointing to a valid file will be used This is used only // when there are no OCIRuntime/OCIRuntimes defined. It is used only to be diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 0044d6cb9..769e37e15 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.20.1 +1.20.2 diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index f1c941f11..ff6e297f4 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -1544,8 +1544,8 @@ func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { } dev := stat.Rdev - majorNum := major(dev) - minorNum := minor(dev) + majorNum := major(uint64(dev)) + minorNum := minor(uint64(dev)) logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) return majorNum, minorNum, nil diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 2906e3e08..930a57a97 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -311,6 +311,9 @@ func parseOptions(options []string) (*overlayOptions, error) { return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err) } o.mountProgram = val + case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home": + logrus.Debugf("overlay: skip_mount_home=%s", val) + o.skipMountHome, err = strconv.ParseBool(val) case ".ignore_chown_errors", "overlay2.ignore_chown_errors", "overlay.ignore_chown_errors": logrus.Debugf("overlay: ignore_chown_errors=%s", val) o.ignoreChownErrors, err = strconv.ParseBool(val) diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod index a7d9ade60..01ac1827b 100644 --- a/vendor/github.com/containers/storage/go.mod +++ b/vendor/github.com/containers/storage/go.mod @@ -6,23 +6,23 @@ require ( github.com/Microsoft/hcsshim v0.8.9 github.com/docker/go-units v0.4.0 github.com/hashicorp/go-multierror v1.0.0 - github.com/klauspost/compress v1.10.5 + github.com/klauspost/compress v1.10.7 github.com/klauspost/pgzip v1.2.4 github.com/mattn/go-shellwords v1.0.10 github.com/mistifyio/go-zfs v2.1.1+incompatible github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/runc v1.0.0-rc9 + github.com/opencontainers/runc v1.0.0-rc90 github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 - github.com/opencontainers/selinux v1.5.1 + github.com/opencontainers/selinux v1.5.2 github.com/pkg/errors v0.9.1 github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 github.com/sirupsen/logrus v1.6.0 - github.com/stretchr/testify v1.5.1 + github.com/stretchr/testify v1.6.0 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 github.com/tchap/go-patricia v2.3.0+incompatible github.com/vbatts/tar-split v0.11.1 golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 - golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 + golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 gotest.tools v2.2.0+incompatible ) diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum index eab0fd61e..2b5a279c6 100644 --- a/vendor/github.com/containers/storage/go.sum +++ b/vendor/github.com/containers/storage/go.sum @@ -8,6 +8,7 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -15,12 +16,14 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -29,6 +32,7 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= @@ -42,8 +46,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqjewrc= -github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -62,12 +66,12 @@ github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1 github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc90 h1:4+xo8mtWixbHoEm451+WJNUrq12o2/tDsyK9Vgc/NcA= +github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700 h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.5.1 h1:jskKwSMFYqyTrHEuJgQoUlTcId0av64S6EWObrIfn5Y= -github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q= +github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -81,15 +85,17 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5 h1:MCfT24H3f//U5+UCrZp1/riVO3B50BovxtDiNn0XKkk= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= @@ -120,8 +126,8 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE= -golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -145,6 +151,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index 1ac6c04e3..4a35997ea 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -2,8 +2,6 @@ package config import ( "fmt" - - "github.com/sirupsen/logrus" ) // ThinpoolOptionsConfig represents the "storage.options.thinpool" @@ -269,11 +267,11 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { } else if options.Size != "" { doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) } - - if options.Overlay.SkipMountHome != "" || options.SkipMountHome != "" { - logrus.Warn("skip_mount_home option is no longer supported, ignoring option") + if options.Overlay.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome)) + } else if options.SkipMountHome != "" { + doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome)) } - case "vfs": if options.Vfs.IgnoreChownErrors != "" { doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors)) diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index c7f9b2cf8..19909e9c6 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -76,6 +76,9 @@ additionalimagestores = [ # mountopt specifies comma separated list of extra mount options mountopt = "nodev" +# Set to skip a PRIVATE bind mount on the storage home directory. +# skip_mount_home = "false" + # Size is used to set a maximum size of the container image. # size = "" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index eaf622f43..263f4edb2 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -3481,6 +3481,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { if config.Storage.Options.MountProgram != "" { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram)) } + if config.Storage.Options.SkipMountHome != "" { + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome)) + } if config.Storage.Options.IgnoreChownErrors != "" { storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors)) } diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 50d56ffbf..52b111d5f 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -1,5 +1,5 @@ [![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) [![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) [![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) [![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) @@ -18,16 +18,16 @@ Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/githu Raw Result (easyjson requires static code generation) -| | ns/op | allocation bytes | allocation times | -| --- | --- | --- | --- | -| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | -| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | -| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | -| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | -| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | -| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | -Always benchmark with your own workload. +Always benchmark with your own workload. The result depends heavily on the data input. # Usage @@ -41,10 +41,10 @@ import "encoding/json" json.Marshal(&data) ``` -with +with ```go -import "github.com/json-iterator/go" +import jsoniter "github.com/json-iterator/go" var json = jsoniter.ConfigCompatibleWithStandardLibrary json.Marshal(&data) @@ -60,7 +60,7 @@ json.Unmarshal(input, &data) with ```go -import "github.com/json-iterator/go" +import jsoniter "github.com/json-iterator/go" var json = jsoniter.ConfigCompatibleWithStandardLibrary json.Unmarshal(input, &data) @@ -78,10 +78,10 @@ go get github.com/json-iterator/go Contributors -* [thockin](https://github.com/thockin) -* [mattn](https://github.com/mattn) -* [cch123](https://github.com/cch123) -* [Oleg Shaldybin](https://github.com/olegshaldybin) -* [Jason Toffaletti](https://github.com/toffaletti) +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go index a4b93c78c..1f12f6612 100644 --- a/vendor/github.com/json-iterator/go/any_str.go +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -64,7 +64,6 @@ func (any *stringAny) ToInt64() int64 { flag := 1 startPos := 0 - endPos := 0 if any.val[0] == '+' || any.val[0] == '-' { startPos = 1 } @@ -73,6 +72,7 @@ func (any *stringAny) ToInt64() int64 { flag = -1 } + endPos := startPos for i := startPos; i < len(any.val); i++ { if any.val[i] >= '0' && any.val[i] <= '9' { endPos = i + 1 @@ -98,7 +98,6 @@ func (any *stringAny) ToUint64() uint64 { } startPos := 0 - endPos := 0 if any.val[0] == '-' { return 0 @@ -107,6 +106,7 @@ func (any *stringAny) ToUint64() uint64 { startPos = 1 } + endPos := startPos for i := startPos; i < len(any.val); i++ { if any.val[i] >= '0' && any.val[i] <= '9' { endPos = i + 1 diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go index 8c58fcba5..2adcdc3b7 100644 --- a/vendor/github.com/json-iterator/go/config.go +++ b/vendor/github.com/json-iterator/go/config.go @@ -183,11 +183,11 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { rawMessage := *(*json.RawMessage)(ptr) iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) iter.Read() - if iter.Error != nil { + if iter.Error != nil && iter.Error != io.EOF { stream.WriteRaw("null") } else { - cfg.ReturnIterator(iter) stream.WriteRaw(string(rawMessage)) } }, func(ptr unsafe.Pointer) bool { diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go index b65137114..58ee89c84 100644 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -150,7 +150,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { if c == '}' { return iter.decrementDepth() } - iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) iter.decrementDepth() return false } @@ -206,7 +206,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { if c == '}' { return iter.decrementDepth() } - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) iter.decrementDepth() return false } diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 80320cd64..74a97bfe5 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -475,7 +475,7 @@ func calcFieldNames(originalFieldName string, tagProvidedFieldName string, whole fieldNames = []string{tagProvidedFieldName} } // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' if isNotExported { fieldNames = []string{} } diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 9e2b623fe..582967130 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -49,6 +49,33 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { return decoder } } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + switch typ.Kind() { case reflect.String: return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) @@ -63,31 +90,6 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { typ = reflect2.DefaultTypeOfKind(typ.Kind()) return &numericMapKeyDecoder{decoderOfType(ctx, typ)} default: - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(unmarshalerType) { - return &unmarshalerDecoder{ - valType: typ, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(textUnmarshalerType) { - return &textUnmarshalerDecoder{ - valType: typ, - } - } return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} } } @@ -103,6 +105,19 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { return encoder } } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + switch typ.Kind() { case reflect.String: return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) @@ -117,17 +132,6 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { typ = reflect2.DefaultTypeOfKind(typ.Kind()) return &numericMapKeyEncoder{encoderOfType(ctx, typ)} default: - if typ == textMarshalerType { - return &directTextMarshalerEncoder{ - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Implements(textMarshalerType) { - return &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } if typ.Kind() == reflect.Interface { return &dynamicMapKeyEncoder{ctx, typ} } @@ -163,10 +167,6 @@ func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { if c == '}' { return } - if c != '"' { - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) - return - } iter.unreadByte() key := decoder.keyType.UnsafeNew() decoder.keyDecoder.Decode(key, iter) diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go index 43ec71d6d..fa71f4748 100644 --- a/vendor/github.com/json-iterator/go/reflect_optional.go +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -2,7 +2,6 @@ package jsoniter import ( "github.com/modern-go/reflect2" - "reflect" "unsafe" ) @@ -10,9 +9,6 @@ func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { ptrType := typ.(*reflect2.UnsafePtrType) elemType := ptrType.Elem() decoder := decoderOfType(ctx, elemType) - if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { - return &dereferenceDecoder{elemType, decoder} - } return &OptionalDecoder{elemType, decoder} } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 5ad5cc561..d7eb0eb5c 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -507,7 +507,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } if c != '}' { @@ -588,7 +588,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -622,7 +622,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -660,7 +660,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -702,7 +702,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -748,7 +748,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -798,7 +798,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -852,7 +852,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -910,7 +910,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -972,7 +972,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -1038,7 +1038,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go index 17662fded..23d8a3ad6 100644 --- a/vendor/github.com/json-iterator/go/stream.go +++ b/vendor/github.com/json-iterator/go/stream.go @@ -103,14 +103,14 @@ func (stream *Stream) Flush() error { if stream.Error != nil { return stream.Error } - n, err := stream.out.Write(stream.buf) + _, err := stream.out.Write(stream.buf) if err != nil { if stream.Error == nil { stream.Error = err } return err } - stream.buf = stream.buf[n:] + stream.buf = stream.buf[:0] return nil } @@ -177,7 +177,6 @@ func (stream *Stream) WriteEmptyObject() { func (stream *Stream) WriteMore() { stream.writeByte(',') stream.writeIndention(0) - stream.Flush() } // WriteArrayStart write [ with possible indention diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 97ae66a4a..fb42a398b 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -155,20 +155,70 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { // The length of the supplied input must match the end of a block exactly. // Before this is called, the table must be initialized with ReadTable unless // the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if len(s.dt.single) == 0 { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { return nil, errors.New("no table loaded") } var br bitReader - err = br.init(in) + err := br.init(src) if err != nil { - return nil, err + return dst, err } - s.Out = s.Out[:0] + maxDecodedSize := cap(dst) + dst = dst[:0] decode := func() byte { - val := br.peekBitsFast(s.actualTableLog) /* note : actualTableLog >= 1 */ - v := s.dt.single[val] + val := br.peekBitsFast(d.actualTableLog) /* note : actualTableLog >= 1 */ + v := d.dt.single[val] br.bitsRead += uint8(v.entry) return uint8(v.entry >> 8) } @@ -180,88 +230,80 @@ func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { // Avoid bounds check by always having full sized table. const tlSize = 1 << tableLogMax const tlMask = tlSize - 1 - dt := s.dt.single[:tlSize] + dt := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var tmp = s.huffWeight[:256] + var buf [256]byte var off uint8 for br.off >= 8 { br.fillFast() - tmp[off+0] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) - tmp[off+1] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) + buf[off+0] = hasDec(dt[br.peekBitsFast(d.actualTableLog)&tlMask]) + buf[off+1] = hasDec(dt[br.peekBitsFast(d.actualTableLog)&tlMask]) br.fillFast() - tmp[off+2] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) - tmp[off+3] = hasDec(dt[br.peekBitsFast(s.actualTableLog)&tlMask]) + buf[off+2] = hasDec(dt[br.peekBitsFast(d.actualTableLog)&tlMask]) + buf[off+3] = hasDec(dt[br.peekBitsFast(d.actualTableLog)&tlMask]) off += 4 if off == 0 { - if len(s.Out)+256 > s.MaxDecodedSize { + if len(dst)+256 > maxDecodedSize { br.close() return nil, ErrMaxDecodedSizeExceeded } - s.Out = append(s.Out, tmp...) + dst = append(dst, buf[:]...) } } - if len(s.Out)+int(off) > s.MaxDecodedSize { + if len(dst)+int(off) > maxDecodedSize { br.close() return nil, ErrMaxDecodedSizeExceeded } - s.Out = append(s.Out, tmp[:off]...) + dst = append(dst, buf[:off]...) for !br.finished() { br.fill() - if len(s.Out) >= s.MaxDecodedSize { + if len(dst) >= maxDecodedSize { br.close() return nil, ErrMaxDecodedSizeExceeded } - s.Out = append(s.Out, decode()) + dst = append(dst, decode()) } - return s.Out, br.close() + return dst, br.close() } // Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. // The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (s *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { if len(s.dt.single) == 0 { return nil, errors.New("no table loaded") } - if len(in) < 6+(4*1) { + if len(src) < 6+(4*1) { return nil, errors.New("input too small") } - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - // TODO: We do not detect when we overrun a buffer, except if the last one does. var br [4]bitReader start := 6 for i := 0; i < 3; i++ { - length := int(in[i*2]) | (int(in[i*2+1]) << 8) - if start+length >= len(in) { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") } - err = br[i].init(in[start : start+length]) + err := br[i].init(src[start : start+length]) if err != nil { return nil, err } start += length } - err = br[3].init(in[start:]) + err := br[3].init(src[start:]) if err != nil { return nil, err } - // Prepare output - if cap(s.Out) < dstSize { - s.Out = make([]byte, 0, dstSize) - } - s.Out = s.Out[:dstSize] // destination, offset to match first output - dstOut := s.Out + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst dstEvery := (dstSize + 3) / 4 const tlSize = 1 << tableLogMax @@ -276,7 +318,7 @@ func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { } // Use temp table to avoid bound checks/append penalty. - var tmp = s.huffWeight[:256] + var buf [256]byte var off uint8 var decoded int @@ -300,8 +342,8 @@ bigloop: val2 := br[stream].peekBitsFast(s.actualTableLog) v2 := single[val2&tlMask] - tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8) - tmp[off+bufoff*stream] = uint8(v.entry >> 8) + buf[off+bufoff*stream+1] = uint8(v2.entry >> 8) + buf[off+bufoff*stream] = uint8(v.entry >> 8) br[stream].bitsRead += uint8(v2.entry) } @@ -313,8 +355,8 @@ bigloop: val2 := br[stream].peekBitsFast(s.actualTableLog) v2 := single[val2&tlMask] - tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8) - tmp[off+bufoff*stream] = uint8(v.entry >> 8) + buf[off+bufoff*stream+1] = uint8(v2.entry >> 8) + buf[off+bufoff*stream] = uint8(v.entry >> 8) br[stream].bitsRead += uint8(v2.entry) } @@ -326,8 +368,8 @@ bigloop: val2 := br[stream].peekBitsFast(s.actualTableLog) v2 := single[val2&tlMask] - tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8) - tmp[off+bufoff*stream] = uint8(v.entry >> 8) + buf[off+bufoff*stream+1] = uint8(v2.entry >> 8) + buf[off+bufoff*stream] = uint8(v.entry >> 8) br[stream].bitsRead += uint8(v2.entry) } @@ -339,8 +381,8 @@ bigloop: val2 := br[stream].peekBitsFast(s.actualTableLog) v2 := single[val2&tlMask] - tmp[off+bufoff*stream+1] = uint8(v2.entry >> 8) - tmp[off+bufoff*stream] = uint8(v.entry >> 8) + buf[off+bufoff*stream+1] = uint8(v2.entry >> 8) + buf[off+bufoff*stream] = uint8(v.entry >> 8) br[stream].bitsRead += uint8(v2.entry) } @@ -350,30 +392,30 @@ bigloop: if bufoff > dstEvery { return nil, errors.New("corruption detected: stream overrun 1") } - copy(dstOut, tmp[:bufoff]) - copy(dstOut[dstEvery:], tmp[bufoff:bufoff*2]) - copy(dstOut[dstEvery*2:], tmp[bufoff*2:bufoff*3]) - copy(dstOut[dstEvery*3:], tmp[bufoff*3:bufoff*4]) + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) off = 0 - dstOut = dstOut[bufoff:] + out = out[bufoff:] decoded += 256 // There must at least be 3 buffers left. - if len(dstOut) < dstEvery*3 { + if len(out) < dstEvery*3 { return nil, errors.New("corruption detected: stream overrun 2") } } } if off > 0 { ioff := int(off) - if len(dstOut) < dstEvery*3+ioff { + if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(dstOut, tmp[:off]) - copy(dstOut[dstEvery:dstEvery+ioff], tmp[bufoff:bufoff*2]) - copy(dstOut[dstEvery*2:dstEvery*2+ioff], tmp[bufoff*2:bufoff*3]) - copy(dstOut[dstEvery*3:dstEvery*3+ioff], tmp[bufoff*3:bufoff*4]) + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) decoded += int(off) * 4 - dstOut = dstOut[off:] + out = out[off:] } // Decode remaining. @@ -382,10 +424,10 @@ bigloop: br := &br[i] for !br.finished() { br.fill() - if offset >= len(dstOut) { + if offset >= len(out) { return nil, errors.New("corruption detected: stream overrun 4") } - dstOut[offset] = decode(br) + out[offset] = decode(br) offset++ } decoded += offset - dstEvery*i @@ -397,7 +439,7 @@ bigloop: if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } - return s.Out, nil + return dst, nil } // matches will compare a decoding table to a coding table. diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index bc977a302..f2a80b5d0 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -309,6 +309,20 @@ The decoder can be used for *concurrent* decompression of multiple buffers. It will only allow a certain number of concurrent operations to run. To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `RegisterDict(data)` with the dictionary data before starting any decompression. + +The dictionary will be used automatically for the data that specifies them. + +A re-used Decoder will still contain the dictionaries registered. + +When registering a dictionary with the same ID it will override the existing. + ### Allocation-less operation The decoder has been designed to operate without allocations after a warmup. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 19181caea..4a14242c7 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -461,26 +461,22 @@ func (b *blockDec) decodeCompressed(hist *history) error { if huff == nil { huff = &huff0.Scratch{} } - huff.Out = b.literalBuf[:0] huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) return err } // Use our out buffer. - huff.Out = b.literalBuf[:0] - huff.MaxDecodedSize = litRegenSize if fourStreams { - literals, err = huff.Decompress4X(literals, litRegenSize) + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) } else { - literals, err = huff.Decompress1X(literals) + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) } if err != nil { println("decoding compressed literals:", err) return err } // Make sure we don't leak our literals buffer - huff.Out = nil if len(literals) != litRegenSize { return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } @@ -631,15 +627,12 @@ func (b *blockDec) decodeCompressed(hist *history) error { var err error // Use our out buffer. huff = hist.huffTree - huff.Out = b.literalBuf[:0] - huff.MaxDecodedSize = litRegenSize if fourStreams { - literals, err = huff.Decompress4X(literals, litRegenSize) + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) } else { - literals, err = huff.Decompress1X(literals) + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) } // Make sure we don't leak our literals buffer - huff.Out = nil if err != nil { println("decompressing literals:", err) return err @@ -649,12 +642,13 @@ func (b *blockDec) decodeCompressed(hist *history) error { } } else { if hist.huffTree != nil && huff != nil { - huffDecoderPool.Put(hist.huffTree) + if hist.dict == nil || hist.dict.litDec != hist.huffTree { + huffDecoderPool.Put(hist.huffTree) + } hist.huffTree = nil } } if huff != nil { - huff.Out = nil hist.huffTree = huff } if debug { @@ -687,14 +681,20 @@ func (b *blockDec) decodeCompressed(hist *history) error { // If only recent offsets were not transferred, this would be an obvious win. // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history any more. + if hist.dict != nil { + hist.dict.content = nil + } } + + if err := seqs.initialize(br, hist, literals, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + err = seqs.decode(nSeqs, br, hbytes) if err != nil { return err diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go index dc4378b64..f708df1c4 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -4,6 +4,8 @@ package zstd +import "encoding/binary" + // byteReader provides a byte reader that reads // little endian values from a byte stream. // The input stream is manually advanced. @@ -55,12 +57,7 @@ func (b byteReader) Uint32() uint32 { } return v } - b2 := b.b[b.off : b.off+4 : b.off+4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) + return binary.LittleEndian.Uint32(b.b[b.off : b.off+4]) } // unread returns the unread portion of the input. diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 324347623..8e34479ff 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -32,8 +32,9 @@ type Decoder struct { // Current read position used for Reader functionality. current decoderState - // Custom dictionaries - dicts map[uint32]struct{} + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -295,10 +296,18 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { frame.bBuf = input for { + frame.history.reset() err := frame.reset(&frame.bBuf) if err == io.EOF { return dst, nil } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + frame.history.setDict(&dict) + } if err != nil { return dst, err } @@ -393,6 +402,19 @@ func (d *Decoder) Close() { d.current.err = ErrDecoderClosed } +// RegisterDict will load a dictionary +func (d *Decoder) RegisterDict(b []byte) error { + dc, err := loadDict(b) + if err != nil { + return err + } + if d.dicts == nil { + d.dicts = make(map[uint32]dict, 1) + } + d.dicts[dc.id] = *dc + return nil +} + // IOReadCloser returns the decoder as an io.ReadCloser for convenience. // Any changes to the decoder will be reflected, so the returned ReadCloser // can be reused along with the decoder. @@ -466,6 +488,14 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { if debug && err != nil { println("Frame decoder returned", err) } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } if err != nil { stream.output <- decodeOutput{ err: err, diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 000000000..8eb6f6ba3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,104 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litDec *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litDec, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 5ebead9dc..50276bcde 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -671,4 +671,8 @@ encodeLoop: println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index d1d3658e6..4104b456c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -383,6 +383,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { panic("src too big") } } + // Protect against e.cur wraparound. if e.cur >= bufferReset { for i := range e.table[:] { @@ -516,6 +517,9 @@ encodeLoop: if debugAsserts && s-t > e.maxMatchOff { panic("s - t >e.maxMatchOff") } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } break } @@ -548,6 +552,9 @@ encodeLoop: panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } // Extend the 4-byte match as long as possible. //l := e.matchlenNoHist(s+4, t+4, src) + 4 // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 @@ -647,6 +654,10 @@ encodeLoop: if debug { println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } } func (e *fastBase) addBlock(src []byte) int32 { @@ -714,7 +725,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { } // Reset the encoding table. -func (e *fastBase) Reset() { +func (e *fastBase) Reset(singleBlock bool) { if e.blk == nil { e.blk = &blockEnc{} e.blk.init() @@ -727,7 +738,7 @@ func (e *fastBase) Reset() { } else { e.crc.Reset() } - if cap(e.hist) < int(e.maxMatchOff*2) { + if !singleBlock && cap(e.hist) < int(e.maxMatchOff*2) { l := e.maxMatchOff * 2 // Make it at least 1MB. if l < 1<<20 { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index af4f00b73..bf42bb1cf 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -35,7 +35,7 @@ type encoder interface { AppendCRC([]byte) []byte WindowSize(size int) int32 UseBlock(*blockEnc) - Reset() + Reset(singleBlock bool) } type encoderState struct { @@ -82,7 +82,10 @@ func (e *Encoder) initialize() { } e.encoders = make(chan encoder, e.o.concurrent) for i := 0; i < e.o.concurrent; i++ { - e.encoders <- e.o.encoder() + enc := e.o.encoder() + // If not single block, history will be allocated on first use. + enc.Reset(true) + e.encoders <- enc } } @@ -112,7 +115,7 @@ func (e *Encoder) Reset(w io.Writer) { s.filling = s.filling[:0] s.current = s.current[:0] s.previous = s.previous[:0] - s.encoder.Reset() + s.encoder.Reset(false) s.headerWritten = false s.eofWritten = false s.fullFrameWritten = false @@ -445,11 +448,10 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { enc := <-e.encoders defer func() { // Release encoder reference to last block. - enc.Reset() + // If a non-single block is needed the encoder will reset again. + enc.Reset(true) e.encoders <- enc }() - enc.Reset() - blk := enc.Block() // Use single segments when above minimum window and below 1MB. single := len(src) < 1<<20 && len(src) > MinWindowSize if e.o.single != nil { @@ -472,12 +474,13 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { panic(err) } - if len(src) <= e.o.blockSize && len(src) <= maxBlockSize { + // If we can do everything in one block, prefer that. + if len(src) <= maxCompressedBlockSize { // Slightly faster with no history and everything in one block. if e.o.crc { _, _ = enc.CRC().Write(src) } - blk.reset(nil) + blk := enc.Block() blk.last = true enc.EncodeNoHist(blk, src) @@ -504,6 +507,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } blk.output = oldout } else { + enc.Reset(false) + blk := enc.Block() for len(src) > 0 { todo := src if len(todo) > e.o.blockSize { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 780880ebe..fc4a566d3 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -40,7 +40,7 @@ type frameDec struct { FrameContentSize uint64 frameDone sync.WaitGroup - DictionaryID uint32 + DictionaryID *uint32 HasCheckSum bool SingleSegment bool @@ -142,7 +142,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = 0 + d.DictionaryID = nil if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -154,19 +154,22 @@ func (d *frameDec) reset(br byteBuffer) error { } return io.ErrUnexpectedEOF } + var id uint32 switch size { case 1: - d.DictionaryID = uint32(b[0]) + id = uint32(b[0]) case 2: - d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + id = uint32(b[0]) | (uint32(b[1]) << 8) case 4: - d.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) } if debug { - println("Dict size", size, "ID:", d.DictionaryID) + println("Dict size", size, "ID:", id) } - if d.DictionaryID != 0 { - return ErrUnknownDictionary + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id } } @@ -351,8 +354,6 @@ func (d *frameDec) initAsync() { // When the frame has finished decoding the *bufio.Reader // containing the remaining input will be sent on frameDec.frameDone. func (d *frameDec) startDecoder(output chan decodeOutput) { - // TODO: Init to dictionary - d.history.reset() written := int64(0) defer func() { @@ -445,8 +446,6 @@ func (d *frameDec) startDecoder(output chan decodeOutput) { // runDecoder will create a sync decoder that will decode a block of data. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - // TODO: Init to dictionary - d.history.reset() saved := d.history.b // We use the history for output to avoid copying it. diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index e002be98b..957cfeb79 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -19,7 +19,7 @@ const ( * Increasing memory usage improves compression ratio * Reduced memory usage can improve speed, due to cache effect * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 11 + maxMemoryUsage = tablelogAbsoluteMax + 2 maxTableLog = maxMemoryUsage - 2 maxTablesize = 1 << maxTableLog diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index e8c419bd5..f418f50fc 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -17,6 +17,7 @@ type history struct { windowSize int maxSize int error bool + dict *dict } // reset will reset the history to initial state of a frame. @@ -36,12 +37,27 @@ func (h *history) reset() { } h.decoders = sequenceDecs{} if h.huffTree != nil { - huffDecoderPool.Put(h.huffTree) + if h.dict == nil || h.dict.litDec != h.huffTree { + huffDecoderPool.Put(h.huffTree) + } } h.huffTree = nil + h.dict = nil //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) } +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.recentOffsets = dict.offsets + h.huffTree = dict.litDec +} + // append bytes to history. // This function will make sure there is space for it, // if the buffer has been allocated with enough extra space. diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 39238e16a..7ff870400 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -62,6 +62,7 @@ type sequenceDecs struct { matchLengths sequenceDec prevOffset [3]int hist []byte + dict []byte literals []byte out []byte windowSize int @@ -85,6 +86,10 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize s.out = out + s.dict = nil + if hist.dict != nil { + s.dict = hist.dict.content + } return nil } @@ -100,23 +105,78 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { printf("reading sequence %d, exceeded available data\n", seqs-i) return io.ErrUnexpectedEOF } - var litLen, matchOff, matchLen int + var ll, mo, ml int if br.off > 4+((maxOffsetBits+16+16)>>3) { - litLen, matchOff, matchLen = s.nextFast(br, llState, mlState, ofState) + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } br.fillFast() } else { - litLen, matchOff, matchLen = s.next(br, llState, mlState, ofState) + ll, mo, ml = s.next(br, llState, mlState, ofState) br.fill() } if debugSequences { - println("Seq", seqs-i-1, "Litlen:", litLen, "matchOff:", matchOff, "(abs) matchLen:", matchLen) + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) } - if litLen > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", litLen, len(s.literals)) + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := litLen + matchLen + len(s.out) + size := ll + ml + len(s.out) if size-startSize > maxBlockSize { return fmt.Errorf("output (%d) bigger than max block size", size) } @@ -127,52 +187,70 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { s.out = append(s.out, make([]byte, maxBlockSize)...) s.out = s.out[:len(s.out)-maxBlockSize] } - if matchLen > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", matchLen) - } - if matchOff > len(s.out)+len(hist)+litLen { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", matchOff, len(s.out)+len(hist)+litLen) - } - if matchOff > s.windowSize { - return fmt.Errorf("match offset (%d) bigger than window size (%d)", matchOff, s.windowSize) - } - if matchOff == 0 && matchLen > 0 { - return fmt.Errorf("zero matchoff and matchlen > 0") + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } - s.out = append(s.out, s.literals[:litLen]...) - s.literals = s.literals[litLen:] + // Add literals + s.out = append(s.out, s.literals[:ll]...) + s.literals = s.literals[ll:] out := s.out + if mo > len(s.out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + mo -= len(s.dict) - dictO + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := matchOff - len(s.out); v > 0 { + if v := mo - len(s.out); v > 0 { // v is the start position in history from end. start := len(s.hist) - v - if matchLen > v { + if ml > v { // Some goes into current block. // Copy remainder of history out = append(out, s.hist[start:]...) - matchOff -= v - matchLen -= v + mo -= v + ml -= v } else { - out = append(out, s.hist[start:start+matchLen]...) - matchLen = 0 + out = append(out, s.hist[start:start+ml]...) + ml = 0 } } // We must be in current buffer now - if matchLen > 0 { - start := len(s.out) - matchOff - if matchLen <= len(s.out)-start { + if ml > 0 { + start := len(s.out) - mo + if ml <= len(s.out)-start { // No overlap - out = append(out, s.out[start:start+matchLen]...) + out = append(out, s.out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. - out = out[:len(out)+matchLen] - src := out[start : start+matchLen] + out = out[:len(out)+ml] + src := out[start : start+ml] // Destination is the space we just added. - dst := out[len(out)-matchLen:] + dst := out[len(out)-ml:] dst = dst[:len(src)] for i := range src { dst[i] = src[i] diff --git a/vendor/github.com/seccomp/containers-golang/seccomp.json b/vendor/github.com/seccomp/containers-golang/seccomp.json index 4c84d981f..06b39024a 100644 --- a/vendor/github.com/seccomp/containers-golang/seccomp.json +++ b/vendor/github.com/seccomp/containers-golang/seccomp.json @@ -317,7 +317,6 @@ "signalfd", "signalfd4", "sigreturn", - "socket", "socketcall", "socketpair", "splice", @@ -769,6 +768,111 @@ ] }, "excludes": {} + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ERRNO", + "args": [ + { + "index": 0, + "value": 16, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + }, + { + "index": 2, + "value": 9, + "valueTwo": 0, + "op": "SCMP_CMP_EQ" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + }, + "errnoRet": 22 + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 2, + "value": 9, + "valueTwo": 0, + "op": "SCMP_CMP_NE" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + } + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 16, + "valueTwo": 0, + "op": "SCMP_CMP_NE" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + } + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 2, + "value": 9, + "valueTwo": 0, + "op": "SCMP_CMP_NE" + } + ], + "comment": "", + "includes": {}, + "excludes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + } + }, + { + "names": [ + "socket" + ], + "action": "SCMP_ACT_ALLOW", + "args": null, + "comment": "", + "includes": { + "caps": [ + "CAP_AUDIT_WRITE" + ] + }, + "excludes": {} } ] }
\ No newline at end of file diff --git a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go index e137a5887..2e3e337ac 100644 --- a/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go +++ b/vendor/github.com/seccomp/containers-golang/seccomp_default_linux.go @@ -7,6 +7,8 @@ package seccomp // import "github.com/seccomp/containers-golang" import ( + "syscall" + "golang.org/x/sys/unix" ) @@ -45,6 +47,8 @@ func arches() []Architecture { // DefaultProfile defines the whitelist for the default seccomp profile. func DefaultProfile() *Seccomp { + einval := uint(syscall.EINVAL) + syscalls := []*Syscall{ { Names: []string{ @@ -313,7 +317,6 @@ func DefaultProfile() *Seccomp { "signalfd", "signalfd4", "sigreturn", - "socket", "socketcall", "socketpair", "splice", @@ -652,6 +655,85 @@ func DefaultProfile() *Seccomp { Caps: []string{"CAP_SYS_TTY_CONFIG"}, }, }, + { + Names: []string{ + "socket", + }, + Action: ActErrno, + ErrnoRet: &einval, + Args: []*Arg{ + { + Index: 0, + Value: syscall.AF_NETLINK, + Op: OpEqualTo, + }, + { + Index: 2, + Value: syscall.NETLINK_AUDIT, + Op: OpEqualTo, + }, + }, + Excludes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, + { + Names: []string{ + "socket", + }, + Action: ActAllow, + Args: []*Arg{ + { + Index: 2, + Value: syscall.NETLINK_AUDIT, + Op: OpNotEqual, + }, + }, + Excludes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, + { + Names: []string{ + "socket", + }, + Action: ActAllow, + Args: []*Arg{ + { + Index: 0, + Value: syscall.AF_NETLINK, + Op: OpNotEqual, + }, + }, + Excludes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, + { + Names: []string{ + "socket", + }, + Action: ActAllow, + Args: []*Arg{ + { + Index: 2, + Value: syscall.NETLINK_AUDIT, + Op: OpNotEqual, + }, + }, + Excludes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, + { + Names: []string{ + "socket", + }, + Action: ActAllow, + Includes: Filter{ + Caps: []string{"CAP_AUDIT_WRITE"}, + }, + }, } return &Seccomp{ diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index b4c46042b..49370eb16 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -6,7 +6,6 @@ package assert import ( - io "io" http "net/http" url "net/url" time "time" @@ -202,11 +201,11 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool { +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } - return HTTPBodyContains(t, handler, method, url, values, body, str, append([]interface{}{msg}, args...)...) + return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) } // HTTPBodyNotContainsf asserts that a specified handler returns a @@ -215,11 +214,11 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool { +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } - return HTTPBodyNotContains(t, handler, method, url, values, body, str, append([]interface{}{msg}, args...)...) + return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) } // HTTPErrorf asserts that a specified handler returns an error status code. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 9bea8d189..9db889427 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -6,7 +6,6 @@ package assert import ( - io "io" http "net/http" url "net/url" time "time" @@ -386,11 +385,11 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args . // a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool { +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() } - return HTTPBodyContains(a.t, handler, method, url, values, body, str, msgAndArgs...) + return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) } // HTTPBodyContainsf asserts that a specified handler returns a @@ -399,11 +398,11 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u // a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool { +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() } - return HTTPBodyContainsf(a.t, handler, method, url, values, body, str, msg, args...) + return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPBodyNotContains asserts that a specified handler returns a @@ -412,11 +411,11 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, // a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool { +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() } - return HTTPBodyNotContains(a.t, handler, method, url, values, body, str, msgAndArgs...) + return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) } // HTTPBodyNotContainsf asserts that a specified handler returns a @@ -425,11 +424,11 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string // a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) bool { +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() } - return HTTPBodyNotContainsf(a.t, handler, method, url, values, body, str, msg, args...) + return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPError asserts that a specified handler returns an error status code. diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 30ef7cc06..4ed341dd2 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -2,7 +2,6 @@ package assert import ( "fmt" - "io" "net/http" "net/http/httptest" "net/url" @@ -112,13 +111,9 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // HTTPBody is a helper that returns HTTP body of the response. It returns // empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values, body io.Reader) string { +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - - if values != nil { - url = url + "?" + values.Encode() - } - req, err := http.NewRequest(method, url, body) + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) if err != nil { return "" } @@ -132,13 +127,13 @@ func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values, b // assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool { +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } - httpBody := HTTPBody(handler, method, url, values, body) + body := HTTPBody(handler, method, url, values) - contains := strings.Contains(httpBody, fmt.Sprint(str)) + contains := strings.Contains(body, fmt.Sprint(str)) if !contains { Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) } @@ -152,13 +147,13 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, // assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) bool { +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } - httpBody := HTTPBody(handler, method, url, values, body) + body := HTTPBody(handler, method, url, values) - contains := strings.Contains(httpBody, fmt.Sprint(str)) + contains := strings.Contains(body, fmt.Sprint(str)) if contains { Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 693648f8a..ec4624b28 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -7,7 +7,6 @@ package require import ( assert "github.com/stretchr/testify/assert" - io "io" http "net/http" url "net/url" time "time" @@ -489,11 +488,11 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) { +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.HTTPBodyContains(t, handler, method, url, values, body, str, msgAndArgs...) { + if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { return } t.FailNow() @@ -505,11 +504,11 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) { +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.HTTPBodyContainsf(t, handler, method, url, values, body, str, msg, args...) { + if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { return } t.FailNow() @@ -521,11 +520,11 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) { +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.HTTPBodyNotContains(t, handler, method, url, values, body, str, msgAndArgs...) { + if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { return } t.FailNow() @@ -537,11 +536,11 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) { +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } - if assert.HTTPBodyNotContainsf(t, handler, method, url, values, body, str, msg, args...) { + if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { return } t.FailNow() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 84fc1c88d..103d7dcb6 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -7,7 +7,6 @@ package require import ( assert "github.com/stretchr/testify/assert" - io "io" http "net/http" url "net/url" time "time" @@ -387,11 +386,11 @@ func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args . // a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) { +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() } - HTTPBodyContains(a.t, handler, method, url, values, body, str, msgAndArgs...) + HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) } // HTTPBodyContainsf asserts that a specified handler returns a @@ -400,11 +399,11 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u // a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) { +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() } - HTTPBodyContainsf(a.t, handler, method, url, values, body, str, msg, args...) + HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPBodyNotContains asserts that a specified handler returns a @@ -413,11 +412,11 @@ func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, // a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msgAndArgs ...interface{}) { +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() } - HTTPBodyNotContains(a.t, handler, method, url, values, body, str, msgAndArgs...) + HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) } // HTTPBodyNotContainsf asserts that a specified handler returns a @@ -426,11 +425,11 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string // a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, body io.Reader, str interface{}, msg string, args ...interface{}) { +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() } - HTTPBodyNotContainsf(a.t, handler, method, url, values, body, str, msg, args...) + HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) } // HTTPError asserts that a specified handler returns an error status code. diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go new file mode 100644 index 000000000..b909471cc --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -0,0 +1,813 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package agent implements the ssh-agent protocol, and provides both +// a client and a server. The client can talk to a standard ssh-agent +// that uses UNIX sockets, and one could implement an alternative +// ssh-agent process using the sample server. +// +// References: +// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 +package agent // import "golang.org/x/crypto/ssh/agent" + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "sync" + + "crypto" + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh" +) + +// SignatureFlags represent additional flags that can be passed to the signature +// requests an defined in [PROTOCOL.agent] section 4.5.1. +type SignatureFlags uint32 + +// SignatureFlag values as defined in [PROTOCOL.agent] section 5.3. +const ( + SignatureFlagReserved SignatureFlags = 1 << iota + SignatureFlagRsaSha256 + SignatureFlagRsaSha512 +) + +// Agent represents the capabilities of an ssh-agent. +type Agent interface { + // List returns the identities known to the agent. + List() ([]*Key, error) + + // Sign has the agent sign the data using a protocol 2 key as defined + // in [PROTOCOL.agent] section 2.6.2. + Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) + + // Add adds a private key to the agent. + Add(key AddedKey) error + + // Remove removes all identities with the given public key. + Remove(key ssh.PublicKey) error + + // RemoveAll removes all identities. + RemoveAll() error + + // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. + Lock(passphrase []byte) error + + // Unlock undoes the effect of Lock + Unlock(passphrase []byte) error + + // Signers returns signers for all the known keys. + Signers() ([]ssh.Signer, error) +} + +type ExtendedAgent interface { + Agent + + // SignWithFlags signs like Sign, but allows for additional flags to be sent/received + SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) + + // Extension processes a custom extension request. Standard-compliant agents are not + // required to support any extensions, but this method allows agents to implement + // vendor-specific methods or add experimental features. See [PROTOCOL.agent] section 4.7. + // If agent extensions are unsupported entirely this method MUST return an + // ErrExtensionUnsupported error. Similarly, if just the specific extensionType in + // the request is unsupported by the agent then ErrExtensionUnsupported MUST be + // returned. + // + // In the case of success, since [PROTOCOL.agent] section 4.7 specifies that the contents + // of the response are unspecified (including the type of the message), the complete + // response will be returned as a []byte slice, including the "type" byte of the message. + Extension(extensionType string, contents []byte) ([]byte, error) +} + +// ConstraintExtension describes an optional constraint defined by users. +type ConstraintExtension struct { + // ExtensionName consist of a UTF-8 string suffixed by the + // implementation domain following the naming scheme defined + // in Section 4.2 of [RFC4251], e.g. "foo@example.com". + ExtensionName string + // ExtensionDetails contains the actual content of the extended + // constraint. + ExtensionDetails []byte +} + +// AddedKey describes an SSH key to be added to an Agent. +type AddedKey struct { + // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey, + // ed25519.PrivateKey or *ecdsa.PrivateKey, which will be inserted into the + // agent. + PrivateKey interface{} + // Certificate, if not nil, is communicated to the agent and will be + // stored with the key. + Certificate *ssh.Certificate + // Comment is an optional, free-form string. + Comment string + // LifetimeSecs, if not zero, is the number of seconds that the + // agent will store the key for. + LifetimeSecs uint32 + // ConfirmBeforeUse, if true, requests that the agent confirm with the + // user before each use of this key. + ConfirmBeforeUse bool + // ConstraintExtensions are the experimental or private-use constraints + // defined by users. + ConstraintExtensions []ConstraintExtension +} + +// See [PROTOCOL.agent], section 3. +const ( + agentRequestV1Identities = 1 + agentRemoveAllV1Identities = 9 + + // 3.2 Requests from client to agent for protocol 2 key operations + agentAddIdentity = 17 + agentRemoveIdentity = 18 + agentRemoveAllIdentities = 19 + agentAddIDConstrained = 25 + + // 3.3 Key-type independent requests from client to agent + agentAddSmartcardKey = 20 + agentRemoveSmartcardKey = 21 + agentLock = 22 + agentUnlock = 23 + agentAddSmartcardKeyConstrained = 26 + + // 3.7 Key constraint identifiers + agentConstrainLifetime = 1 + agentConstrainConfirm = 2 + agentConstrainExtension = 3 +) + +// maxAgentResponseBytes is the maximum agent reply size that is accepted. This +// is a sanity check, not a limit in the spec. +const maxAgentResponseBytes = 16 << 20 + +// Agent messages: +// These structures mirror the wire format of the corresponding ssh agent +// messages found in [PROTOCOL.agent]. + +// 3.4 Generic replies from agent to client +const agentFailure = 5 + +type failureAgentMsg struct{} + +const agentSuccess = 6 + +type successAgentMsg struct{} + +// See [PROTOCOL.agent], section 2.5.2. +const agentRequestIdentities = 11 + +type requestIdentitiesAgentMsg struct{} + +// See [PROTOCOL.agent], section 2.5.2. +const agentIdentitiesAnswer = 12 + +type identitiesAnswerAgentMsg struct { + NumKeys uint32 `sshtype:"12"` + Keys []byte `ssh:"rest"` +} + +// See [PROTOCOL.agent], section 2.6.2. +const agentSignRequest = 13 + +type signRequestAgentMsg struct { + KeyBlob []byte `sshtype:"13"` + Data []byte + Flags uint32 +} + +// See [PROTOCOL.agent], section 2.6.2. + +// 3.6 Replies from agent to client for protocol 2 key operations +const agentSignResponse = 14 + +type signResponseAgentMsg struct { + SigBlob []byte `sshtype:"14"` +} + +type publicKey struct { + Format string + Rest []byte `ssh:"rest"` +} + +// 3.7 Key constraint identifiers +type constrainLifetimeAgentMsg struct { + LifetimeSecs uint32 `sshtype:"1"` +} + +type constrainExtensionAgentMsg struct { + ExtensionName string `sshtype:"3"` + ExtensionDetails []byte + + // Rest is a field used for parsing, not part of message + Rest []byte `ssh:"rest"` +} + +// See [PROTOCOL.agent], section 4.7 +const agentExtension = 27 +const agentExtensionFailure = 28 + +// ErrExtensionUnsupported indicates that an extension defined in +// [PROTOCOL.agent] section 4.7 is unsupported by the agent. Specifically this +// error indicates that the agent returned a standard SSH_AGENT_FAILURE message +// as the result of a SSH_AGENTC_EXTENSION request. Note that the protocol +// specification (and therefore this error) does not distinguish between a +// specific extension being unsupported and extensions being unsupported entirely. +var ErrExtensionUnsupported = errors.New("agent: extension unsupported") + +type extensionAgentMsg struct { + ExtensionType string `sshtype:"27"` + Contents []byte +} + +// Key represents a protocol 2 public key as defined in +// [PROTOCOL.agent], section 2.5.2. +type Key struct { + Format string + Blob []byte + Comment string +} + +func clientErr(err error) error { + return fmt.Errorf("agent: client error: %v", err) +} + +// String returns the storage form of an agent key with the format, base64 +// encoded serialized key, and the comment if it is not empty. +func (k *Key) String() string { + s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob) + + if k.Comment != "" { + s += " " + k.Comment + } + + return s +} + +// Type returns the public key type. +func (k *Key) Type() string { + return k.Format +} + +// Marshal returns key blob to satisfy the ssh.PublicKey interface. +func (k *Key) Marshal() []byte { + return k.Blob +} + +// Verify satisfies the ssh.PublicKey interface. +func (k *Key) Verify(data []byte, sig *ssh.Signature) error { + pubKey, err := ssh.ParsePublicKey(k.Blob) + if err != nil { + return fmt.Errorf("agent: bad public key: %v", err) + } + return pubKey.Verify(data, sig) +} + +type wireKey struct { + Format string + Rest []byte `ssh:"rest"` +} + +func parseKey(in []byte) (out *Key, rest []byte, err error) { + var record struct { + Blob []byte + Comment string + Rest []byte `ssh:"rest"` + } + + if err := ssh.Unmarshal(in, &record); err != nil { + return nil, nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(record.Blob, &wk); err != nil { + return nil, nil, err + } + + return &Key{ + Format: wk.Format, + Blob: record.Blob, + Comment: record.Comment, + }, record.Rest, nil +} + +// client is a client for an ssh-agent process. +type client struct { + // conn is typically a *net.UnixConn + conn io.ReadWriter + // mu is used to prevent concurrent access to the agent + mu sync.Mutex +} + +// NewClient returns an Agent that talks to an ssh-agent process over +// the given connection. +func NewClient(rw io.ReadWriter) ExtendedAgent { + return &client{conn: rw} +} + +// call sends an RPC to the agent. On success, the reply is +// unmarshaled into reply and replyType is set to the first byte of +// the reply, which contains the type of the message. +func (c *client) call(req []byte) (reply interface{}, err error) { + buf, err := c.callRaw(req) + if err != nil { + return nil, err + } + reply, err = unmarshal(buf) + if err != nil { + return nil, clientErr(err) + } + return reply, nil +} + +// callRaw sends an RPC to the agent. On success, the raw +// bytes of the response are returned; no unmarshalling is +// performed on the response. +func (c *client) callRaw(req []byte) (reply []byte, err error) { + c.mu.Lock() + defer c.mu.Unlock() + + msg := make([]byte, 4+len(req)) + binary.BigEndian.PutUint32(msg, uint32(len(req))) + copy(msg[4:], req) + if _, err = c.conn.Write(msg); err != nil { + return nil, clientErr(err) + } + + var respSizeBuf [4]byte + if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil { + return nil, clientErr(err) + } + respSize := binary.BigEndian.Uint32(respSizeBuf[:]) + if respSize > maxAgentResponseBytes { + return nil, clientErr(errors.New("response too large")) + } + + buf := make([]byte, respSize) + if _, err = io.ReadFull(c.conn, buf); err != nil { + return nil, clientErr(err) + } + return buf, nil +} + +func (c *client) simpleCall(req []byte) error { + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +func (c *client) RemoveAll() error { + return c.simpleCall([]byte{agentRemoveAllIdentities}) +} + +func (c *client) Remove(key ssh.PublicKey) error { + req := ssh.Marshal(&agentRemoveIdentityMsg{ + KeyBlob: key.Marshal(), + }) + return c.simpleCall(req) +} + +func (c *client) Lock(passphrase []byte) error { + req := ssh.Marshal(&agentLockMsg{ + Passphrase: passphrase, + }) + return c.simpleCall(req) +} + +func (c *client) Unlock(passphrase []byte) error { + req := ssh.Marshal(&agentUnlockMsg{ + Passphrase: passphrase, + }) + return c.simpleCall(req) +} + +// List returns the identities known to the agent. +func (c *client) List() ([]*Key, error) { + // see [PROTOCOL.agent] section 2.5.2. + req := []byte{agentRequestIdentities} + + msg, err := c.call(req) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *identitiesAnswerAgentMsg: + if msg.NumKeys > maxAgentResponseBytes/8 { + return nil, errors.New("agent: too many keys in agent reply") + } + keys := make([]*Key, msg.NumKeys) + data := msg.Keys + for i := uint32(0); i < msg.NumKeys; i++ { + var key *Key + var err error + if key, data, err = parseKey(data); err != nil { + return nil, err + } + keys[i] = key + } + return keys, nil + case *failureAgentMsg: + return nil, errors.New("agent: failed to list keys") + } + panic("unreachable") +} + +// Sign has the agent sign the data using a protocol 2 key as defined +// in [PROTOCOL.agent] section 2.6.2. +func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { + return c.SignWithFlags(key, data, 0) +} + +func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { + req := ssh.Marshal(signRequestAgentMsg{ + KeyBlob: key.Marshal(), + Data: data, + Flags: uint32(flags), + }) + + msg, err := c.call(req) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *signResponseAgentMsg: + var sig ssh.Signature + if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil { + return nil, err + } + + return &sig, nil + case *failureAgentMsg: + return nil, errors.New("agent: failed to sign challenge") + } + panic("unreachable") +} + +// unmarshal parses an agent message in packet, returning the parsed +// form and the message type of packet. +func unmarshal(packet []byte) (interface{}, error) { + if len(packet) < 1 { + return nil, errors.New("agent: empty packet") + } + var msg interface{} + switch packet[0] { + case agentFailure: + return new(failureAgentMsg), nil + case agentSuccess: + return new(successAgentMsg), nil + case agentIdentitiesAnswer: + msg = new(identitiesAnswerAgentMsg) + case agentSignResponse: + msg = new(signResponseAgentMsg) + case agentV1IdentitiesAnswer: + msg = new(agentV1IdentityMsg) + default: + return nil, fmt.Errorf("agent: unknown type tag %d", packet[0]) + } + if err := ssh.Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} + +type rsaKeyMsg struct { + Type string `sshtype:"17|25"` + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type dsaKeyMsg struct { + Type string `sshtype:"17|25"` + P *big.Int + Q *big.Int + G *big.Int + Y *big.Int + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ecdsaKeyMsg struct { + Type string `sshtype:"17|25"` + Curve string + KeyBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ed25519KeyMsg struct { + Type string `sshtype:"17|25"` + Pub []byte + Priv []byte + Comments string + Constraints []byte `ssh:"rest"` +} + +// Insert adds a private key to the agent. +func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { + var req []byte + switch k := s.(type) { + case *rsa.PrivateKey: + if len(k.Primes) != 2 { + return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) + } + k.Precompute() + req = ssh.Marshal(rsaKeyMsg{ + Type: ssh.KeyAlgoRSA, + N: k.N, + E: big.NewInt(int64(k.E)), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, + }) + case *dsa.PrivateKey: + req = ssh.Marshal(dsaKeyMsg{ + Type: ssh.KeyAlgoDSA, + P: k.P, + Q: k.Q, + G: k.G, + Y: k.Y, + X: k.X, + Comments: comment, + Constraints: constraints, + }) + case *ecdsa.PrivateKey: + nistID := fmt.Sprintf("nistp%d", k.Params().BitSize) + req = ssh.Marshal(ecdsaKeyMsg{ + Type: "ecdsa-sha2-" + nistID, + Curve: nistID, + KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), + D: k.D, + Comments: comment, + Constraints: constraints, + }) + case ed25519.PrivateKey: + req = ssh.Marshal(ed25519KeyMsg{ + Type: ssh.KeyAlgoED25519, + Pub: []byte(k)[32:], + Priv: []byte(k), + Comments: comment, + Constraints: constraints, + }) + // This function originally supported only *ed25519.PrivateKey, however the + // general idiom is to pass ed25519.PrivateKey by value, not by pointer. + // We still support the pointer variant for backwards compatibility. + case *ed25519.PrivateKey: + req = ssh.Marshal(ed25519KeyMsg{ + Type: ssh.KeyAlgoED25519, + Pub: []byte(*k)[32:], + Priv: []byte(*k), + Comments: comment, + Constraints: constraints, + }) + default: + return fmt.Errorf("agent: unsupported key type %T", s) + } + + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIDConstrained + } + + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +type rsaCertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type dsaCertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ecdsaCertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ed25519CertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + Pub []byte + Priv []byte + Comments string + Constraints []byte `ssh:"rest"` +} + +// Add adds a private key to the agent. If a certificate is given, +// that certificate is added instead as public key. +func (c *client) Add(key AddedKey) error { + var constraints []byte + + if secs := key.LifetimeSecs; secs != 0 { + constraints = append(constraints, ssh.Marshal(constrainLifetimeAgentMsg{secs})...) + } + + if key.ConfirmBeforeUse { + constraints = append(constraints, agentConstrainConfirm) + } + + cert := key.Certificate + if cert == nil { + return c.insertKey(key.PrivateKey, key.Comment, constraints) + } + return c.insertCert(key.PrivateKey, cert, key.Comment, constraints) +} + +func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error { + var req []byte + switch k := s.(type) { + case *rsa.PrivateKey: + if len(k.Primes) != 2 { + return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) + } + k.Precompute() + req = ssh.Marshal(rsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, + }) + case *dsa.PrivateKey: + req = ssh.Marshal(dsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + X: k.X, + Comments: comment, + Constraints: constraints, + }) + case *ecdsa.PrivateKey: + req = ssh.Marshal(ecdsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Comments: comment, + Constraints: constraints, + }) + case ed25519.PrivateKey: + req = ssh.Marshal(ed25519CertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + Pub: []byte(k)[32:], + Priv: []byte(k), + Comments: comment, + Constraints: constraints, + }) + // This function originally supported only *ed25519.PrivateKey, however the + // general idiom is to pass ed25519.PrivateKey by value, not by pointer. + // We still support the pointer variant for backwards compatibility. + case *ed25519.PrivateKey: + req = ssh.Marshal(ed25519CertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + Pub: []byte(*k)[32:], + Priv: []byte(*k), + Comments: comment, + Constraints: constraints, + }) + default: + return fmt.Errorf("agent: unsupported key type %T", s) + } + + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIDConstrained + } + + signer, err := ssh.NewSignerFromKey(s) + if err != nil { + return err + } + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return errors.New("agent: signer and cert have different public key") + } + + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +// Signers provides a callback for client authentication. +func (c *client) Signers() ([]ssh.Signer, error) { + keys, err := c.List() + if err != nil { + return nil, err + } + + var result []ssh.Signer + for _, k := range keys { + result = append(result, &agentKeyringSigner{c, k}) + } + return result, nil +} + +type agentKeyringSigner struct { + agent *client + pub ssh.PublicKey +} + +func (s *agentKeyringSigner) PublicKey() ssh.PublicKey { + return s.pub +} + +func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) { + // The agent has its own entropy source, so the rand argument is ignored. + return s.agent.Sign(s.pub, data) +} + +func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) { + var flags SignatureFlags + if opts != nil { + switch opts.HashFunc() { + case crypto.SHA256: + flags = SignatureFlagRsaSha256 + case crypto.SHA512: + flags = SignatureFlagRsaSha512 + } + } + return s.agent.SignWithFlags(s.pub, data, flags) +} + +// Calls an extension method. It is up to the agent implementation as to whether or not +// any particular extension is supported and may always return an error. Because the +// type of the response is up to the implementation, this returns the bytes of the +// response and does not attempt any type of unmarshalling. +func (c *client) Extension(extensionType string, contents []byte) ([]byte, error) { + req := ssh.Marshal(extensionAgentMsg{ + ExtensionType: extensionType, + Contents: contents, + }) + buf, err := c.callRaw(req) + if err != nil { + return nil, err + } + if len(buf) == 0 { + return nil, errors.New("agent: failure; empty response") + } + // [PROTOCOL.agent] section 4.7 indicates that an SSH_AGENT_FAILURE message + // represents an agent that does not support the extension + if buf[0] == agentFailure { + return nil, ErrExtensionUnsupported + } + if buf[0] == agentExtensionFailure { + return nil, errors.New("agent: generic extension failure") + } + + return buf, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go new file mode 100644 index 000000000..fd24ba900 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/forward.go @@ -0,0 +1,103 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "errors" + "io" + "net" + "sync" + + "golang.org/x/crypto/ssh" +) + +// RequestAgentForwarding sets up agent forwarding for the session. +// ForwardToAgent or ForwardToRemote should be called to route +// the authentication requests. +func RequestAgentForwarding(session *ssh.Session) error { + ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil) + if err != nil { + return err + } + if !ok { + return errors.New("forwarding request denied") + } + return nil +} + +// ForwardToAgent routes authentication requests to the given keyring. +func ForwardToAgent(client *ssh.Client, keyring Agent) error { + channels := client.HandleChannelOpen(channelType) + if channels == nil { + return errors.New("agent: already have handler for " + channelType) + } + + go func() { + for ch := range channels { + channel, reqs, err := ch.Accept() + if err != nil { + continue + } + go ssh.DiscardRequests(reqs) + go func() { + ServeAgent(keyring, channel) + channel.Close() + }() + } + }() + return nil +} + +const channelType = "auth-agent@openssh.com" + +// ForwardToRemote routes authentication requests to the ssh-agent +// process serving on the given unix socket. +func ForwardToRemote(client *ssh.Client, addr string) error { + channels := client.HandleChannelOpen(channelType) + if channels == nil { + return errors.New("agent: already have handler for " + channelType) + } + conn, err := net.Dial("unix", addr) + if err != nil { + return err + } + conn.Close() + + go func() { + for ch := range channels { + channel, reqs, err := ch.Accept() + if err != nil { + continue + } + go ssh.DiscardRequests(reqs) + go forwardUnixSocket(channel, addr) + } + }() + return nil +} + +func forwardUnixSocket(channel ssh.Channel, addr string) { + conn, err := net.Dial("unix", addr) + if err != nil { + return + } + + var wg sync.WaitGroup + wg.Add(2) + go func() { + io.Copy(conn, channel) + conn.(*net.UnixConn).CloseWrite() + wg.Done() + }() + go func() { + io.Copy(channel, conn) + channel.CloseWrite() + wg.Done() + }() + + wg.Wait() + conn.Close() + channel.Close() +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go new file mode 100644 index 000000000..c9d979430 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -0,0 +1,241 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "bytes" + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "sync" + "time" + + "golang.org/x/crypto/ssh" +) + +type privKey struct { + signer ssh.Signer + comment string + expire *time.Time +} + +type keyring struct { + mu sync.Mutex + keys []privKey + + locked bool + passphrase []byte +} + +var errLocked = errors.New("agent: locked") + +// NewKeyring returns an Agent that holds keys in memory. It is safe +// for concurrent use by multiple goroutines. +func NewKeyring() Agent { + return &keyring{} +} + +// RemoveAll removes all identities. +func (r *keyring) RemoveAll() error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + r.keys = nil + return nil +} + +// removeLocked does the actual key removal. The caller must already be holding the +// keyring mutex. +func (r *keyring) removeLocked(want []byte) error { + found := false + for i := 0; i < len(r.keys); { + if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) { + found = true + r.keys[i] = r.keys[len(r.keys)-1] + r.keys = r.keys[:len(r.keys)-1] + continue + } else { + i++ + } + } + + if !found { + return errors.New("agent: key not found") + } + return nil +} + +// Remove removes all identities with the given public key. +func (r *keyring) Remove(key ssh.PublicKey) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + return r.removeLocked(key.Marshal()) +} + +// Lock locks the agent. Sign and Remove will fail, and List will return an empty list. +func (r *keyring) Lock(passphrase []byte) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + r.locked = true + r.passphrase = passphrase + return nil +} + +// Unlock undoes the effect of Lock +func (r *keyring) Unlock(passphrase []byte) error { + r.mu.Lock() + defer r.mu.Unlock() + if !r.locked { + return errors.New("agent: not locked") + } + if 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { + return fmt.Errorf("agent: incorrect passphrase") + } + + r.locked = false + r.passphrase = nil + return nil +} + +// expireKeysLocked removes expired keys from the keyring. If a key was added +// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have +// ellapsed, it is removed. The caller *must* be holding the keyring mutex. +func (r *keyring) expireKeysLocked() { + for _, k := range r.keys { + if k.expire != nil && time.Now().After(*k.expire) { + r.removeLocked(k.signer.PublicKey().Marshal()) + } + } +} + +// List returns the identities known to the agent. +func (r *keyring) List() ([]*Key, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + // section 2.7: locked agents return empty. + return nil, nil + } + + r.expireKeysLocked() + var ids []*Key + for _, k := range r.keys { + pub := k.signer.PublicKey() + ids = append(ids, &Key{ + Format: pub.Type(), + Blob: pub.Marshal(), + Comment: k.comment}) + } + return ids, nil +} + +// Insert adds a private key to the keyring. If a certificate +// is given, that certificate is added as public key. Note that +// any constraints given are ignored. +func (r *keyring) Add(key AddedKey) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + signer, err := ssh.NewSignerFromKey(key.PrivateKey) + + if err != nil { + return err + } + + if cert := key.Certificate; cert != nil { + signer, err = ssh.NewCertSigner(cert, signer) + if err != nil { + return err + } + } + + p := privKey{ + signer: signer, + comment: key.Comment, + } + + if key.LifetimeSecs > 0 { + t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second) + p.expire = &t + } + + r.keys = append(r.keys, p) + + return nil +} + +// Sign returns a signature for the data. +func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { + return r.SignWithFlags(key, data, 0) +} + +func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return nil, errLocked + } + + r.expireKeysLocked() + wanted := key.Marshal() + for _, k := range r.keys { + if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) { + if flags == 0 { + return k.signer.Sign(rand.Reader, data) + } else { + if algorithmSigner, ok := k.signer.(ssh.AlgorithmSigner); !ok { + return nil, fmt.Errorf("agent: signature does not support non-default signature algorithm: %T", k.signer) + } else { + var algorithm string + switch flags { + case SignatureFlagRsaSha256: + algorithm = ssh.SigAlgoRSASHA2256 + case SignatureFlagRsaSha512: + algorithm = ssh.SigAlgoRSASHA2512 + default: + return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags) + } + return algorithmSigner.SignWithAlgorithm(rand.Reader, data, algorithm) + } + } + } + } + return nil, errors.New("not found") +} + +// Signers returns signers for all the known keys. +func (r *keyring) Signers() ([]ssh.Signer, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return nil, errLocked + } + + r.expireKeysLocked() + s := make([]ssh.Signer, 0, len(r.keys)) + for _, k := range r.keys { + s = append(s, k.signer) + } + return s, nil +} + +// The keyring does not support any extensions +func (r *keyring) Extension(extensionType string, contents []byte) ([]byte, error) { + return nil, ErrExtensionUnsupported +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go new file mode 100644 index 000000000..6e7a1e02f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/server.go @@ -0,0 +1,570 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "math/big" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh" +) + +// Server wraps an Agent and uses it to implement the agent side of +// the SSH-agent, wire protocol. +type server struct { + agent Agent +} + +func (s *server) processRequestBytes(reqData []byte) []byte { + rep, err := s.processRequest(reqData) + if err != nil { + if err != errLocked { + // TODO(hanwen): provide better logging interface? + log.Printf("agent %d: %v", reqData[0], err) + } + return []byte{agentFailure} + } + + if err == nil && rep == nil { + return []byte{agentSuccess} + } + + return ssh.Marshal(rep) +} + +func marshalKey(k *Key) []byte { + var record struct { + Blob []byte + Comment string + } + record.Blob = k.Marshal() + record.Comment = k.Comment + + return ssh.Marshal(&record) +} + +// See [PROTOCOL.agent], section 2.5.1. +const agentV1IdentitiesAnswer = 2 + +type agentV1IdentityMsg struct { + Numkeys uint32 `sshtype:"2"` +} + +type agentRemoveIdentityMsg struct { + KeyBlob []byte `sshtype:"18"` +} + +type agentLockMsg struct { + Passphrase []byte `sshtype:"22"` +} + +type agentUnlockMsg struct { + Passphrase []byte `sshtype:"23"` +} + +func (s *server) processRequest(data []byte) (interface{}, error) { + switch data[0] { + case agentRequestV1Identities: + return &agentV1IdentityMsg{0}, nil + + case agentRemoveAllV1Identities: + return nil, nil + + case agentRemoveIdentity: + var req agentRemoveIdentityMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { + return nil, err + } + + return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob}) + + case agentRemoveAllIdentities: + return nil, s.agent.RemoveAll() + + case agentLock: + var req agentLockMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + return nil, s.agent.Lock(req.Passphrase) + + case agentUnlock: + var req agentUnlockMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + return nil, s.agent.Unlock(req.Passphrase) + + case agentSignRequest: + var req signRequestAgentMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { + return nil, err + } + + k := &Key{ + Format: wk.Format, + Blob: req.KeyBlob, + } + + var sig *ssh.Signature + var err error + if extendedAgent, ok := s.agent.(ExtendedAgent); ok { + sig, err = extendedAgent.SignWithFlags(k, req.Data, SignatureFlags(req.Flags)) + } else { + sig, err = s.agent.Sign(k, req.Data) + } + + if err != nil { + return nil, err + } + return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil + + case agentRequestIdentities: + keys, err := s.agent.List() + if err != nil { + return nil, err + } + + rep := identitiesAnswerAgentMsg{ + NumKeys: uint32(len(keys)), + } + for _, k := range keys { + rep.Keys = append(rep.Keys, marshalKey(k)...) + } + return rep, nil + + case agentAddIDConstrained, agentAddIdentity: + return nil, s.insertIdentity(data) + + case agentExtension: + // Return a stub object where the whole contents of the response gets marshaled. + var responseStub struct { + Rest []byte `ssh:"rest"` + } + + if extendedAgent, ok := s.agent.(ExtendedAgent); !ok { + // If this agent doesn't implement extensions, [PROTOCOL.agent] section 4.7 + // requires that we return a standard SSH_AGENT_FAILURE message. + responseStub.Rest = []byte{agentFailure} + } else { + var req extensionAgentMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + res, err := extendedAgent.Extension(req.ExtensionType, req.Contents) + if err != nil { + // If agent extensions are unsupported, return a standard SSH_AGENT_FAILURE + // message as required by [PROTOCOL.agent] section 4.7. + if err == ErrExtensionUnsupported { + responseStub.Rest = []byte{agentFailure} + } else { + // As the result of any other error processing an extension request, + // [PROTOCOL.agent] section 4.7 requires that we return a + // SSH_AGENT_EXTENSION_FAILURE code. + responseStub.Rest = []byte{agentExtensionFailure} + } + } else { + if len(res) == 0 { + return nil, nil + } + responseStub.Rest = res + } + } + + return responseStub, nil + } + + return nil, fmt.Errorf("unknown opcode %d", data[0]) +} + +func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse bool, extensions []ConstraintExtension, err error) { + for len(constraints) != 0 { + switch constraints[0] { + case agentConstrainLifetime: + lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5]) + constraints = constraints[5:] + case agentConstrainConfirm: + confirmBeforeUse = true + constraints = constraints[1:] + case agentConstrainExtension: + var msg constrainExtensionAgentMsg + if err = ssh.Unmarshal(constraints, &msg); err != nil { + return 0, false, nil, err + } + extensions = append(extensions, ConstraintExtension{ + ExtensionName: msg.ExtensionName, + ExtensionDetails: msg.ExtensionDetails, + }) + constraints = msg.Rest + default: + return 0, false, nil, fmt.Errorf("unknown constraint type: %d", constraints[0]) + } + } + return +} + +func setConstraints(key *AddedKey, constraintBytes []byte) error { + lifetimeSecs, confirmBeforeUse, constraintExtensions, err := parseConstraints(constraintBytes) + if err != nil { + return err + } + + key.LifetimeSecs = lifetimeSecs + key.ConfirmBeforeUse = confirmBeforeUse + key.ConstraintExtensions = constraintExtensions + return nil +} + +func parseRSAKey(req []byte) (*AddedKey, error) { + var k rsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + if k.E.BitLen() > 30 { + return nil, errors.New("agent: RSA public exponent too large") + } + priv := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + E: int(k.E.Int64()), + N: k.N, + }, + D: k.D, + Primes: []*big.Int{k.P, k.Q}, + } + priv.Precompute() + + addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseEd25519Key(req []byte) (*AddedKey, error) { + var k ed25519KeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + priv := ed25519.PrivateKey(k.Priv) + + addedKey := &AddedKey{PrivateKey: &priv, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseDSAKey(req []byte) (*AddedKey, error) { + var k dsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + + addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) { + priv = &ecdsa.PrivateKey{ + D: privScalar, + } + + switch curveName { + case "nistp256": + priv.Curve = elliptic.P256() + case "nistp384": + priv.Curve = elliptic.P384() + case "nistp521": + priv.Curve = elliptic.P521() + default: + return nil, fmt.Errorf("agent: unknown curve %q", curveName) + } + + priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes) + if priv.X == nil || priv.Y == nil { + return nil, errors.New("agent: point not on curve") + } + + return priv, nil +} + +func parseEd25519Cert(req []byte) (*AddedKey, error) { + var k ed25519CertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + priv := ed25519.PrivateKey(k.Priv) + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad ED25519 certificate") + } + + addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseECDSAKey(req []byte) (*AddedKey, error) { + var k ecdsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D) + if err != nil { + return nil, err + } + + addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseRSACert(req []byte) (*AddedKey, error) { + var k rsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad RSA certificate") + } + + // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go + var rsaPub struct { + Name string + E *big.Int + N *big.Int + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil { + return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) + } + + if rsaPub.E.BitLen() > 30 { + return nil, errors.New("agent: RSA public exponent too large") + } + + priv := rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + E: int(rsaPub.E.Int64()), + N: rsaPub.N, + }, + D: k.D, + Primes: []*big.Int{k.Q, k.P}, + } + priv.Precompute() + + addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseDSACert(req []byte) (*AddedKey, error) { + var k dsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad DSA certificate") + } + + // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go + var w struct { + Name string + P, Q, G, Y *big.Int + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil { + return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) + } + + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + }, + Y: w.Y, + }, + X: k.X, + } + + addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func parseECDSACert(req []byte) (*AddedKey, error) { + var k ecdsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad ECDSA certificate") + } + + // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go + var ecdsaPub struct { + Name string + ID string + Key []byte + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil { + return nil, err + } + + priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D) + if err != nil { + return nil, err + } + + addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} + if err := setConstraints(addedKey, k.Constraints); err != nil { + return nil, err + } + return addedKey, nil +} + +func (s *server) insertIdentity(req []byte) error { + var record struct { + Type string `sshtype:"17|25"` + Rest []byte `ssh:"rest"` + } + + if err := ssh.Unmarshal(req, &record); err != nil { + return err + } + + var addedKey *AddedKey + var err error + + switch record.Type { + case ssh.KeyAlgoRSA: + addedKey, err = parseRSAKey(req) + case ssh.KeyAlgoDSA: + addedKey, err = parseDSAKey(req) + case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: + addedKey, err = parseECDSAKey(req) + case ssh.KeyAlgoED25519: + addedKey, err = parseEd25519Key(req) + case ssh.CertAlgoRSAv01: + addedKey, err = parseRSACert(req) + case ssh.CertAlgoDSAv01: + addedKey, err = parseDSACert(req) + case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01: + addedKey, err = parseECDSACert(req) + case ssh.CertAlgoED25519v01: + addedKey, err = parseEd25519Cert(req) + default: + return fmt.Errorf("agent: not implemented: %q", record.Type) + } + + if err != nil { + return err + } + return s.agent.Add(*addedKey) +} + +// ServeAgent serves the agent protocol on the given connection. It +// returns when an I/O error occurs. +func ServeAgent(agent Agent, c io.ReadWriter) error { + s := &server{agent} + + var length [4]byte + for { + if _, err := io.ReadFull(c, length[:]); err != nil { + return err + } + l := binary.BigEndian.Uint32(length[:]) + if l == 0 { + return fmt.Errorf("agent: request size is 0") + } + if l > maxAgentResponseBytes { + // We also cap requests. + return fmt.Errorf("agent: request too large: %d", l) + } + + req := make([]byte, l) + if _, err := io.ReadFull(c, req); err != nil { + return err + } + + repData := s.processRequestBytes(req) + if len(repData) > maxAgentResponseBytes { + return fmt.Errorf("agent: reply too large: %d bytes", len(repData)) + } + + binary.BigEndian.PutUint32(length[:], uint32(len(repData))) + if _, err := c.Write(length[:]); err != nil { + return err + } + if _, err := c.Write(repData); err != nil { + return err + } + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index e2e83df9b..a44d0e88d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -84,14 +84,14 @@ github.com/containers/buildah/pkg/secrets github.com/containers/buildah/pkg/supplemented github.com/containers/buildah/pkg/umask github.com/containers/buildah/util -# github.com/containers/common v0.12.0 +# github.com/containers/common v0.13.0 github.com/containers/common/pkg/apparmor github.com/containers/common/pkg/auth github.com/containers/common/pkg/capabilities github.com/containers/common/pkg/cgroupv2 github.com/containers/common/pkg/config github.com/containers/common/pkg/sysinfo -# github.com/containers/conmon v2.0.16+incompatible +# github.com/containers/conmon v2.0.17+incompatible github.com/containers/conmon/runner/config # github.com/containers/image/v5 v5.4.5-0.20200529084758-46b2ee6aebb0 github.com/containers/image/v5/copy @@ -154,7 +154,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.20.1 +# github.com/containers/storage v1.20.2 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs @@ -322,9 +322,9 @@ github.com/imdario/mergo github.com/inconshreveable/mousetrap # github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 github.com/ishidawataru/sctp -# github.com/json-iterator/go v1.1.9 +# github.com/json-iterator/go v1.1.10 github.com/json-iterator/go -# github.com/klauspost/compress v1.10.6 +# github.com/klauspost/compress v1.10.7 github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -480,7 +480,7 @@ github.com/rootless-containers/rootlesskit/pkg/port/builtin/parent/udp/udpproxy github.com/rootless-containers/rootlesskit/pkg/port/portutil # github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 github.com/safchain/ethtool -# github.com/seccomp/containers-golang v0.4.1 +# github.com/seccomp/containers-golang v0.5.0 github.com/seccomp/containers-golang # github.com/seccomp/libseccomp-golang v0.9.1 github.com/seccomp/libseccomp-golang @@ -491,7 +491,7 @@ github.com/sirupsen/logrus/hooks/syslog github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 github.com/spf13/pflag -# github.com/stretchr/testify v1.6.0 +# github.com/stretchr/testify v1.6.1 github.com/stretchr/testify/assert github.com/stretchr/testify/require # github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 @@ -575,6 +575,7 @@ golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/poly1305 golang.org/x/crypto/ssh +golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal # golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 |