summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod2
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum4
-rw-r--r--vendor/github.com/containers/common/libimage/runtime.go5
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go13
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/cni_exec.go1
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/cni_types.go1
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/config.go5
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/network.go1
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/run.go1
-rw-r--r--vendor/github.com/containers/common/libnetwork/internal/util/bridge.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/internal/util/validate.go2
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/config.go4
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/const.go1
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/exec.go1
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/ipam.go3
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/network.go1
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/run.go11
-rw-r--r--vendor/github.com/containers/common/libnetwork/network/interface.go7
-rw-r--r--vendor/github.com/containers/common/libnetwork/types/const.go1
-rw-r--r--vendor/github.com/containers/common/libnetwork/util/filters.go2
-rw-r--r--vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go1
-rw-r--r--vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go1
-rw-r--r--vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/auth/auth.go50
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/chown/chown_unix.go1
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go4
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_local.go1
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_remote.go1
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_unsupported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf6
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go2
-rw-r--r--vendor/github.com/containers/common/pkg/config/default_linux.go10
-rw-r--r--vendor/github.com/containers/common/pkg/config/default_unsupported.go12
-rw-r--r--vendor/github.com/containers/common/pkg/config/default_windows.go12
-rw-r--r--vendor/github.com/containers/common/pkg/config/nosystemd.go1
-rw-r--r--vendor/github.com/containers/common/pkg/config/systemd.go1
-rw-r--r--vendor/github.com/containers/common/pkg/parse/parse_unix.go1
-rw-r--r--vendor/github.com/containers/common/pkg/retry/retry_unsupported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/default_linux.go10
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/errno_list.go1
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/filter.go1
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp.json10
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/supported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/seccomp/validate.go1
-rw-r--r--vendor/github.com/containers/common/pkg/signal/signal_linux.go4
-rw-r--r--vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go1
-rw-r--r--vendor/github.com/containers/common/pkg/signal/signal_unsupported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/numcpu.go1
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go1
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go1
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go1
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go3
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go1
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go1
-rw-r--r--vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go1
-rw-r--r--vendor/github.com/containers/common/pkg/umask/umask_unix.go1
-rw-r--r--vendor/github.com/containers/common/pkg/umask/umask_unsupported.go1
-rw-r--r--vendor/github.com/containers/common/pkg/util/util_supported.go11
-rw-r--r--vendor/github.com/containers/common/pkg/util/util_windows.go1
-rw-r--r--vendor/github.com/containers/ocicrypt/go.mod4
-rw-r--r--vendor/github.com/containers/ocicrypt/go.sum8
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go46
-rw-r--r--vendor/github.com/containers/storage/go.mod6
-rw-r--r--vendor/github.com/containers/storage/go.sum12
-rw-r--r--vendor/github.com/containers/storage/types/options.go26
-rw-r--r--vendor/github.com/klauspost/compress/README.md17
-rw-r--r--vendor/github.com/klauspost/compress/huff0/autogen.go5
-rw-r--r--vendor/github.com/klauspost/compress/huff0/bitreader.go5
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go183
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s488
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in197
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.go181
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.s506
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in195
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_generic.go193
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md72
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go18
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go13
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go13
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fuzz.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fuzz_none.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go20
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zip.go18
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zstd.go7
-rw-r--r--vendor/github.com/miekg/pkcs11/.travis.yml14
-rw-r--r--vendor/github.com/miekg/pkcs11/README.md10
-rw-r--r--vendor/github.com/miekg/pkcs11/pkcs11.go11
-rw-r--r--vendor/github.com/miekg/pkcs11/release.go3
-rw-r--r--vendor/github.com/miekg/pkcs11/types.go12
-rw-r--r--vendor/github.com/miekg/pkcs11/zconst.go (renamed from vendor/github.com/miekg/pkcs11/const.go)196
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go28
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go111
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go31
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_compare.go54
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go16
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go16
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go12
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go24
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_order.go8
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions.go112
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go30
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go24
-rw-r--r--vendor/modules.txt17
107 files changed, 2649 insertions, 556 deletions
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
index 184421370..895d6645a 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
@@ -3,7 +3,7 @@ module github.com/containerd/stargz-snapshotter/estargz
go 1.16
require (
- github.com/klauspost/compress v1.15.0
+ github.com/klauspost/compress v1.15.1
github.com/opencontainers/go-digest v1.0.0
github.com/vbatts/tar-split v0.11.2
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
index 0eb0b7a10..8b44342da 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
@@ -1,8 +1,8 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
-github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
+github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go
index 559a9a6a6..2191e3c4a 100644
--- a/vendor/github.com/containers/common/libimage/runtime.go
+++ b/vendor/github.com/containers/common/libimage/runtime.go
@@ -592,6 +592,8 @@ type RemoveImagesOptions struct {
// containers using a specific image. By default, all containers in
// the local containers storage will be removed (if Force is set).
RemoveContainerFunc RemoveContainerFunc
+ // Ignore if a specified image does not exist and do not throw an error.
+ Ignore bool
// IsExternalContainerFunc allows for checking whether the specified
// container is an external one (when containers=external filter is
// used). The definition of an external container can be set by
@@ -677,6 +679,9 @@ func (r *Runtime) RemoveImages(ctx context.Context, names []string, options *Rem
for _, name := range names {
img, resolvedName, err := r.LookupImage(name, lookupOptions)
if err != nil {
+ if options.Ignore && errors.Is(err, storage.ErrImageUnknown) {
+ continue
+ }
appendError(err)
continue
}
diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
index 5574b2b1c..8c4eeff9d 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/cni_conversion.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package cni
@@ -128,15 +129,21 @@ func findPluginByName(plugins []*libcni.NetworkConfig, name string) bool {
// It returns an array of subnets and an extra bool if dhcp is configured.
func convertIPAMConfToNetwork(network *types.Network, ipam *ipamConfig, confPath string) error {
if ipam.PluginType == types.DHCPIPAMDriver {
- network.IPAMOptions["driver"] = types.DHCPIPAMDriver
+ network.IPAMOptions[types.Driver] = types.DHCPIPAMDriver
return nil
}
if ipam.PluginType != types.HostLocalIPAMDriver {
- return errors.Errorf("unsupported ipam plugin %s in %s", ipam.PluginType, confPath)
+ // This is not an error. While we only support certain ipam drivers, we
+ // cannot make it fail for unsupported ones. CNI is still able to use them,
+ // just our translation logic cannot convert this into a Network.
+ // For the same reason this is not warning, it would just be annoying for
+ // everyone using a unknown ipam driver.
+ logrus.Infof("unsupported ipam plugin %q in %s", ipam.PluginType, confPath)
+ return nil
}
- network.IPAMOptions["driver"] = types.HostLocalIPAMDriver
+ network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver
for _, r := range ipam.Ranges {
for _, ipam := range r {
s := types.Subnet{}
diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go
index c66e7ef5d..6bfa8d63b 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/cni_exec.go
@@ -16,6 +16,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux
// +build linux
package cni
diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go
index fbfcd49ad..9ee159886 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package cni
diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go
index e801e1469..8b300a03b 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/config.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/config.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package cni
@@ -197,12 +198,12 @@ func createIPMACVLAN(network *types.Network) error {
}
}
if len(network.Subnets) == 0 {
- network.IPAMOptions["driver"] = types.DHCPIPAMDriver
+ network.IPAMOptions[types.Driver] = types.DHCPIPAMDriver
if network.Internal {
return errors.New("internal is not supported with macvlan and dhcp ipam driver")
}
} else {
- network.IPAMOptions["driver"] = types.HostLocalIPAMDriver
+ network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver
}
return nil
}
diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go
index 29866062e..82b9cbd2e 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/network.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/network.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package cni
diff --git a/vendor/github.com/containers/common/libnetwork/cni/run.go b/vendor/github.com/containers/common/libnetwork/cni/run.go
index af05d9d9d..8bea87893 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/run.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/run.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package cni
diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go
index d81b78a6f..5a4752e2b 100644
--- a/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go
+++ b/vendor/github.com/containers/common/libnetwork/internal/util/bridge.go
@@ -27,7 +27,7 @@ func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet,
}
}
- if network.IPAMOptions["driver"] != types.DHCPIPAMDriver {
+ if network.IPAMOptions[types.Driver] != types.DHCPIPAMDriver {
if len(network.Subnets) == 0 {
freeSubnet, err := GetFreeIPv4NetworkSubnet(usedNetworks, subnetPools)
if err != nil {
@@ -63,7 +63,7 @@ func CreateBridge(n NetUtil, network *types.Network, usedNetworks []*net.IPNet,
network.Subnets = append(network.Subnets, *freeSubnet)
}
}
- network.IPAMOptions["driver"] = types.HostLocalIPAMDriver
+ network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver
}
return nil
}
diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go
index ac3934f8d..4dd44110a 100644
--- a/vendor/github.com/containers/common/libnetwork/internal/util/validate.go
+++ b/vendor/github.com/containers/common/libnetwork/internal/util/validate.go
@@ -109,7 +109,7 @@ func validatePerNetworkOpts(network *types.Network, netOpts *types.PerNetworkOpt
if netOpts.InterfaceName == "" {
return errors.Errorf("interface name on network %s is empty", network.Name)
}
- if network.IPAMOptions["driver"] == types.HostLocalIPAMDriver {
+ if network.IPAMOptions[types.Driver] == types.HostLocalIPAMDriver {
outer:
for _, ip := range netOpts.StaticIPs {
for _, s := range network.Subnets {
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go
index d42062927..99b4e0308 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/config.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package netavark
@@ -130,6 +131,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
if err != nil {
return nil, err
}
+ defer f.Close()
enc := json.NewEncoder(f)
enc.SetIndent("", " ")
err = enc.Encode(newNetwork)
@@ -154,7 +156,7 @@ func createMacvlan(network *types.Network) error {
if len(network.Subnets) == 0 {
return errors.Errorf("macvlan driver needs at least one subnet specified, DHCP is not supported with netavark")
}
- network.IPAMOptions["driver"] = types.HostLocalIPAMDriver
+ network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver
// validate the given options, we do not need them but just check to make sure they are valid
for key, value := range network.Options {
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/const.go b/vendor/github.com/containers/common/libnetwork/netavark/const.go
index 9709315c6..29a7b4f2a 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/const.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/const.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package netavark
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go
index 1812b9084..ac87c5438 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/exec.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package netavark
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go
index f99d099ca..c0535515a 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package netavark
@@ -361,7 +362,7 @@ func (n *netavarkNetwork) deallocIPs(opts *types.NetworkOptions) error {
// it checks the ipam driver and if subnets are set
func requiresIPAMAlloc(network *types.Network) bool {
// only do host allocation when driver is set to HostLocalIPAMDriver or unset
- switch network.IPAMOptions["driver"] {
+ switch network.IPAMOptions[types.Driver] {
case "", types.HostLocalIPAMDriver:
default:
return false
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go
index d20947cfd..166d5e31a 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/network.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package netavark
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go
index 0a9dc3704..c5aa181fd 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/run.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package netavark
@@ -44,6 +45,16 @@ func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions
return nil, errors.Wrap(err, "failed to convert net opts")
}
+ // Warn users if one or more networks have dns enabled
+ // but aardvark-dns binary is not configured
+ for _, network := range netavarkOpts.Networks {
+ if network != nil && network.DNSEnabled && n.aardvarkBinary == "" {
+ // this is not a fatal error we can still use container without dns
+ logrus.Warnf("aardvark-dns binary not found, container dns will not be enabled")
+ break
+ }
+ }
+
// trace output to get the json
if logrus.IsLevelEnabled(logrus.TraceLevel) {
b, err := json.Marshal(&netavarkOpts)
diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go
index 2c8c59432..9278d7773 100644
--- a/vendor/github.com/containers/common/libnetwork/network/interface.go
+++ b/vendor/github.com/containers/common/libnetwork/network/interface.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package network
@@ -61,11 +62,7 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type
return "", nil, err
}
- aardvarkBin, err := conf.FindHelperBinary(aardvarkBinary, false)
- if err != nil {
- // this is not a fatal error we can still use netavark without dns
- logrus.Warnf("%s binary not found, container dns will not be enabled", aardvarkBin)
- }
+ aardvarkBin, _ := conf.FindHelperBinary(aardvarkBinary, false)
confDir := conf.Network.NetworkConfigDir
if confDir == "" {
diff --git a/vendor/github.com/containers/common/libnetwork/types/const.go b/vendor/github.com/containers/common/libnetwork/types/const.go
index b2d4a4538..5690a6058 100644
--- a/vendor/github.com/containers/common/libnetwork/types/const.go
+++ b/vendor/github.com/containers/common/libnetwork/types/const.go
@@ -11,6 +11,7 @@ const (
IPVLANNetworkDriver = "ipvlan"
// IPAM drivers
+ Driver = "driver"
// HostLocalIPAMDriver store the ip
HostLocalIPAMDriver = "host-local"
// DHCPIPAMDriver get subnet and ip from dhcp server
diff --git a/vendor/github.com/containers/common/libnetwork/util/filters.go b/vendor/github.com/containers/common/libnetwork/util/filters.go
index b27ca1f9a..58d79d25b 100644
--- a/vendor/github.com/containers/common/libnetwork/util/filters.go
+++ b/vendor/github.com/containers/common/libnetwork/util/filters.go
@@ -29,7 +29,7 @@ func createFilterFuncs(key string, filterValues []string) (types.FilterFunc, err
return util.StringMatchRegexSlice(net.Name, filterValues)
}, nil
- case "driver":
+ case types.Driver:
// matches network driver
return func(net types.Network) bool {
return util.StringInSlice(net.Driver, filterValues)
diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
index 735d19493..c864a189e 100644
--- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
+++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go
@@ -1,3 +1,4 @@
+//go:build linux && apparmor
// +build linux,apparmor
package apparmor
diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go
index 021e32571..667fa9f26 100644
--- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go
+++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go
@@ -1,3 +1,4 @@
+//go:build linux && apparmor
// +build linux,apparmor
package apparmor
diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go
index 13469f1b6..dacfc2f48 100644
--- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux || !apparmor
// +build !linux !apparmor
package apparmor
diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go
index af3c8f803..6765c9e5b 100644
--- a/vendor/github.com/containers/common/pkg/auth/auth.go
+++ b/vendor/github.com/containers/common/pkg/auth/auth.go
@@ -4,6 +4,7 @@ import (
"bufio"
"context"
"fmt"
+ "net/url"
"os"
"path/filepath"
"strings"
@@ -165,20 +166,21 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
// parseCredentialsKey turns the provided argument into a valid credential key
// and computes the registry part.
func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry string, err error) {
- if !acceptRepositories {
- registry = getRegistryName(arg)
- key = registry
- return key, registry, nil
+ // URL arguments are replaced with their host[:port] parts.
+ key, err = replaceURLByHostPort(arg)
+ if err != nil {
+ return "", "", err
}
- key = trimScheme(arg)
- if key != arg {
- return "", "", errors.New("credentials key has https[s]:// prefix")
+ split := strings.Split(key, "/")
+ registry = split[0]
+
+ if !acceptRepositories {
+ return registry, registry, nil
}
- registry = getRegistryName(key)
+ // Return early if the key isn't namespaced or uses an http{s} prefix.
if registry == key {
- // The key is not namespaced
return key, registry, nil
}
@@ -202,24 +204,18 @@ func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry str
return key, registry, nil
}
-// getRegistryName scrubs and parses the input to get the server name
-func getRegistryName(server string) string {
- // removes 'http://' or 'https://' from the front of the
- // server/registry string if either is there. This will be mostly used
- // for user input from 'Buildah login' and 'Buildah logout'.
- server = trimScheme(server)
- // gets the registry from the input. If the input is of the form
- // quay.io/myuser/myimage, it will parse it and just return quay.io
- split := strings.Split(server, "/")
- return split[0]
-}
-
-// trimScheme removes the HTTP(s) scheme from the provided repository.
-func trimScheme(repository string) string {
- // removes 'http://' or 'https://' from the front of the
- // server/registry string if either is there. This will be mostly used
- // for user input from 'Buildah login' and 'Buildah logout'.
- return strings.TrimPrefix(strings.TrimPrefix(repository, "https://"), "http://")
+// If the specified string starts with http{s} it is replaced with it's
+// host[:port] parts; everything else is stripped. Otherwise, the string is
+// returned as is.
+func replaceURLByHostPort(repository string) (string, error) {
+ if !strings.HasPrefix(repository, "https://") && !strings.HasPrefix(repository, "http://") {
+ return repository, nil
+ }
+ u, err := url.Parse(repository)
+ if err != nil {
+ return "", fmt.Errorf("trimming http{s} prefix: %v", err)
+ }
+ return u.Host, nil
}
// getUserAndPass gets the username and password from STDIN if not given
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go
index c1fe194b2..edb28ad18 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_supported.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package cgroups
diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go
index 95d424170..b3dcb2d33 100644
--- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package cgroups
diff --git a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go
index 61b3653e5..f61bd3bb2 100644
--- a/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/cgroupv2/cgroups_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package cgroupv2
diff --git a/vendor/github.com/containers/common/pkg/chown/chown_unix.go b/vendor/github.com/containers/common/pkg/chown/chown_unix.go
index 921927de4..ea8f5963e 100644
--- a/vendor/github.com/containers/common/pkg/chown/chown_unix.go
+++ b/vendor/github.com/containers/common/pkg/chown/chown_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package chown
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index a1d6f259a..8bf62800f 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -249,6 +249,10 @@ type EngineConfig struct {
// EventsLogFilePath is where the events log is stored.
EventsLogFilePath string `toml:"events_logfile_path,omitempty"`
+ // EventsLogFileMaxSize sets the maximum size for the events log. When the limit is exceeded,
+ // the logfile is rotated and the old one is deleted.
+ EventsLogFileMaxSize uint64 `toml:"events_logfile_max_size,omitempty"`
+
// EventsLogger determines where events should be logged.
EventsLogger string `toml:"events_logger,omitempty"`
diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go
index 21dab043f..bfb967582 100644
--- a/vendor/github.com/containers/common/pkg/config/config_local.go
+++ b/vendor/github.com/containers/common/pkg/config/config_local.go
@@ -1,3 +1,4 @@
+//go:build !remote
// +build !remote
package config
diff --git a/vendor/github.com/containers/common/pkg/config/config_remote.go b/vendor/github.com/containers/common/pkg/config/config_remote.go
index 7fd9202bb..bff869efa 100644
--- a/vendor/github.com/containers/common/pkg/config/config_remote.go
+++ b/vendor/github.com/containers/common/pkg/config/config_remote.go
@@ -1,3 +1,4 @@
+//go:build remote
// +build remote
package config
diff --git a/vendor/github.com/containers/common/pkg/config/config_unsupported.go b/vendor/github.com/containers/common/pkg/config/config_unsupported.go
index 6563fd317..64e4fcfcd 100644
--- a/vendor/github.com/containers/common/pkg/config/config_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/config/config_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package config
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index 03de59943..1db2d704a 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -371,6 +371,12 @@ default_sysctls = [
# Define where event logs will be stored, when events_logger is "file".
#events_logfile_path=""
+# Sets the maximum size for events_logfile_path in bytes. When the limit is exceeded,
+# the logfile will be rotated and the old one will be deleted.
+# If the maximum size is set to 0, then no limit will be applied,
+# and the logfile will not be rotated.
+#events_logfile_max_size = 0
+
# Selects which logging mechanism to use for container engine events.
# Valid values are `journald`, `file` and `none`.
#
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index e4344e8be..3255cff9d 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -276,7 +276,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
storeOpts.GraphRoot = _defaultGraphRoot
}
c.graphRoot = storeOpts.GraphRoot
- c.ImageCopyTmpDir = "/var/tmp"
+ c.ImageCopyTmpDir = getDefaultTmpDir()
c.StaticDir = filepath.Join(storeOpts.GraphRoot, "libpod")
c.VolumePath = filepath.Join(storeOpts.GraphRoot, "volumes")
diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go
index cc2d0fe3e..d6ea4359c 100644
--- a/vendor/github.com/containers/common/pkg/config/default_linux.go
+++ b/vendor/github.com/containers/common/pkg/config/default_linux.go
@@ -3,6 +3,7 @@ package config
import (
"fmt"
"io/ioutil"
+ "os"
"strconv"
"strings"
@@ -48,3 +49,12 @@ func getDefaultProcessLimits() []string {
}
return defaultLimits
}
+
+// getDefaultTmpDir for linux
+func getDefaultTmpDir() string {
+ // first check the TMPDIR env var
+ if path, found := os.LookupEnv("TMPDIR"); found {
+ return path
+ }
+ return "/var/tmp"
+}
diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go
index 1aa7f6ef3..4be826755 100644
--- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go
@@ -1,7 +1,10 @@
+//go:build !linux && !windows
// +build !linux,!windows
package config
+import "os"
+
// getDefaultMachineImage returns the default machine image stream
// On Linux/Mac, this returns the FCOS stream
func getDefaultMachineImage() string {
@@ -22,3 +25,12 @@ func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) {
func getDefaultProcessLimits() []string {
return []string{}
}
+
+// getDefaultTmpDir for linux
+func getDefaultTmpDir() string {
+ // first check the TMPDIR env var
+ if path, found := os.LookupEnv("TMPDIR"); found {
+ return path
+ }
+ return "/var/tmp"
+}
diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go
index 28f102f1c..db230dfb2 100644
--- a/vendor/github.com/containers/common/pkg/config/default_windows.go
+++ b/vendor/github.com/containers/common/pkg/config/default_windows.go
@@ -1,5 +1,7 @@
package config
+import "os"
+
// getDefaultImage returns the default machine image stream
// On Windows this refers to the Fedora major release number
func getDefaultMachineImage() string {
@@ -20,3 +22,13 @@ func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) {
func getDefaultProcessLimits() []string {
return []string{}
}
+
+// getDefaultTmpDir for windows
+func getDefaultTmpDir() string {
+ // first check the Temp env var
+ // https://answers.microsoft.com/en-us/windows/forum/all/where-is-the-temporary-folder/44a039a5-45ba-48dd-84db-fd700e54fd56
+ if val, ok := os.LookupEnv("TEMP"); ok {
+ return val
+ }
+ return os.Getenv("LOCALAPPDATA") + "\\Temp"
+}
diff --git a/vendor/github.com/containers/common/pkg/config/nosystemd.go b/vendor/github.com/containers/common/pkg/config/nosystemd.go
index f64b2dfc6..352fddf92 100644
--- a/vendor/github.com/containers/common/pkg/config/nosystemd.go
+++ b/vendor/github.com/containers/common/pkg/config/nosystemd.go
@@ -1,3 +1,4 @@
+//go:build !systemd || !cgo
// +build !systemd !cgo
package config
diff --git a/vendor/github.com/containers/common/pkg/config/systemd.go b/vendor/github.com/containers/common/pkg/config/systemd.go
index 186e8b343..f17a84304 100644
--- a/vendor/github.com/containers/common/pkg/config/systemd.go
+++ b/vendor/github.com/containers/common/pkg/config/systemd.go
@@ -1,3 +1,4 @@
+//go:build systemd && cgo
// +build systemd,cgo
package config
diff --git a/vendor/github.com/containers/common/pkg/parse/parse_unix.go b/vendor/github.com/containers/common/pkg/parse/parse_unix.go
index ce4446a1b..d087c4a02 100644
--- a/vendor/github.com/containers/common/pkg/parse/parse_unix.go
+++ b/vendor/github.com/containers/common/pkg/parse/parse_unix.go
@@ -1,3 +1,4 @@
+//go:build linux || darwin
// +build linux darwin
package parse
diff --git a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go
index 676980975..901e28a5d 100644
--- a/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/retry/retry_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package retry
diff --git a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
index d196384f0..fbf10ca31 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/default_linux.go
@@ -236,6 +236,7 @@ func DefaultProfile() *Seccomp {
"lstat64",
"madvise",
"mbind",
+ "membarrier",
"memfd_create",
"memfd_secret",
"mincore",
@@ -249,6 +250,7 @@ func DefaultProfile() *Seccomp {
"mmap",
"mmap2",
"mount",
+ "mount_setattr",
"move_mount",
"mprotect",
"mq_getsetattr",
@@ -293,6 +295,7 @@ func DefaultProfile() *Seccomp {
"preadv",
"preadv2",
"prlimit64",
+ "process_mrelease",
"pselect6",
"pselect6_time64",
"pwrite64",
@@ -388,10 +391,15 @@ func DefaultProfile() *Seccomp {
"shmdt",
"shmget",
"shutdown",
+ "sigaction",
"sigaltstack",
+ "signal",
"signalfd",
"signalfd4",
+ "sigpending",
+ "sigprocmask",
"sigreturn",
+ "sigsuspend",
"socketcall",
"socketpair",
"splice",
@@ -405,6 +413,7 @@ func DefaultProfile() *Seccomp {
"sync",
"sync_file_range",
"syncfs",
+ "syscall",
"sysinfo",
"syslog",
"tee",
@@ -417,6 +426,7 @@ func DefaultProfile() *Seccomp {
"timer_gettime64",
"timer_settime",
"timer_settime64",
+ "timerfd",
"timerfd_create",
"timerfd_gettime",
"timerfd_gettime64",
diff --git a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go
index a1009012d..87ac2ab77 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/errno_list.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/errno_list.go
@@ -1,3 +1,4 @@
+//go:build linux && seccomp
// +build linux,seccomp
package seccomp
diff --git a/vendor/github.com/containers/common/pkg/seccomp/filter.go b/vendor/github.com/containers/common/pkg/seccomp/filter.go
index 90da99f0a..5c278574c 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/filter.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/filter.go
@@ -1,3 +1,4 @@
+//go:build seccomp
// +build seccomp
// NOTE: this package has originally been copied from
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
index 9314eb3cc..793f9bdac 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp.json
@@ -243,6 +243,7 @@
"lstat64",
"madvise",
"mbind",
+ "membarrier",
"memfd_create",
"memfd_secret",
"mincore",
@@ -256,6 +257,7 @@
"mmap",
"mmap2",
"mount",
+ "mount_setattr",
"move_mount",
"mprotect",
"mq_getsetattr",
@@ -300,6 +302,7 @@
"preadv",
"preadv2",
"prlimit64",
+ "process_mrelease",
"pselect6",
"pselect6_time64",
"pwrite64",
@@ -395,10 +398,15 @@
"shmdt",
"shmget",
"shutdown",
+ "sigaction",
"sigaltstack",
+ "signal",
"signalfd",
"signalfd4",
+ "sigpending",
+ "sigprocmask",
"sigreturn",
+ "sigsuspend",
"socketcall",
"socketpair",
"splice",
@@ -412,6 +420,7 @@
"sync",
"sync_file_range",
"syncfs",
+ "syscall",
"sysinfo",
"syslog",
"tee",
@@ -424,6 +433,7 @@
"timer_gettime64",
"timer_settime",
"timer_settime64",
+ "timerfd",
"timerfd_create",
"timerfd_gettime",
"timerfd_gettime64",
diff --git a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go
index 8b23ee2c0..da5230c56 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/seccomp_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux || !seccomp
// +build !linux !seccomp
// SPDX-License-Identifier: Apache-2.0
diff --git a/vendor/github.com/containers/common/pkg/seccomp/supported.go b/vendor/github.com/containers/common/pkg/seccomp/supported.go
index 86e1b66bb..f8a20e536 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/supported.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/supported.go
@@ -1,3 +1,4 @@
+//go:build linux && seccomp
// +build linux,seccomp
package seccomp
diff --git a/vendor/github.com/containers/common/pkg/seccomp/validate.go b/vendor/github.com/containers/common/pkg/seccomp/validate.go
index 1c5c4edc6..669ab04a2 100644
--- a/vendor/github.com/containers/common/pkg/seccomp/validate.go
+++ b/vendor/github.com/containers/common/pkg/seccomp/validate.go
@@ -1,3 +1,4 @@
+//go:build seccomp
// +build seccomp
package seccomp
diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux.go b/vendor/github.com/containers/common/pkg/signal/signal_linux.go
index 305b9d21f..21e09c9fe 100644
--- a/vendor/github.com/containers/common/pkg/signal/signal_linux.go
+++ b/vendor/github.com/containers/common/pkg/signal/signal_linux.go
@@ -1,5 +1,5 @@
-// +build linux
-// +build !mips,!mipsle,!mips64,!mips64le
+//go:build linux && !mips && !mipsle && !mips64 && !mips64le
+// +build linux,!mips,!mipsle,!mips64,!mips64le
// Signal handling for Linux only.
package signal
diff --git a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go
index 45c9d5af1..52b07aaf4 100644
--- a/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go
+++ b/vendor/github.com/containers/common/pkg/signal/signal_linux_mipsx.go
@@ -1,3 +1,4 @@
+//go:build linux && (mips || mipsle || mips64 || mips64le)
// +build linux
// +build mips mipsle mips64 mips64le
diff --git a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go
index 9d1733c02..0e8685a7c 100644
--- a/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/signal/signal_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
// Signal handling for Linux only.
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go
index aeb1a3a80..d9d8cfb3e 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu.go
@@ -1,3 +1,4 @@
+//go:build !linux && !windows
// +build !linux,!windows
package sysinfo
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go
index 2b664c7f8..0adf58358 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_linux.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package sysinfo
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go
index 1d89dd550..94160ad57 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/numcpu_windows.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package sysinfo
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go
index 1fc4e6d19..859791e36 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_linux.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package sysinfo
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go
index e3c851fe6..c9e4184aa 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/nummem_unsupported.go
@@ -1,4 +1,5 @@
-// +build windows, osx
+//go:build (windows && ignore) || osx
+// +build windows,ignore osx
package sysinfo
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go
index 7463cdd8f..801db8c80 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_solaris.go
@@ -1,3 +1,4 @@
+//go:build solaris && cgo
// +build solaris,cgo
package sysinfo
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go
index 45f3ef1c6..4aa9401f6 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_unix.go
@@ -1,3 +1,4 @@
+//go:build !linux && !solaris && !windows
// +build !linux,!solaris,!windows
package sysinfo
diff --git a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go
index 4e6255bc5..455a8892f 100644
--- a/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go
+++ b/vendor/github.com/containers/common/pkg/sysinfo/sysinfo_windows.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package sysinfo
diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unix.go b/vendor/github.com/containers/common/pkg/umask/umask_unix.go
index bb589f7ac..e59d7bea7 100644
--- a/vendor/github.com/containers/common/pkg/umask/umask_unix.go
+++ b/vendor/github.com/containers/common/pkg/umask/umask_unix.go
@@ -1,3 +1,4 @@
+//go:build linux || darwin
// +build linux darwin
package umask
diff --git a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go
index 9041d5f20..cf76ea1d3 100644
--- a/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/umask/umask_unsupported.go
@@ -1,3 +1,4 @@
+//go:build !linux && !darwin
// +build !linux,!darwin
package umask
diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go
index 422e28742..284f3ffdd 100644
--- a/vendor/github.com/containers/common/pkg/util/util_supported.go
+++ b/vendor/github.com/containers/common/pkg/util/util_supported.go
@@ -1,3 +1,4 @@
+//go:build linux || darwin
// +build linux darwin
package util
@@ -19,6 +20,12 @@ var (
rootlessRuntimeDir string
)
+// isWriteableOnlyByOwner checks that the specified permission mask allows write
+// access only to the owner.
+func isWriteableOnlyByOwner(perm os.FileMode) bool {
+ return (perm & 0722) == 0700
+}
+
// GetRuntimeDir returns the runtime directory
func GetRuntimeDir() (string, error) {
var rootlessRuntimeDirError error
@@ -43,7 +50,7 @@ func GetRuntimeDir() (string, error) {
logrus.Debugf("unable to make temp dir: %v", err)
}
st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
+ if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
runtimeDir = tmpDir
}
}
@@ -53,7 +60,7 @@ func GetRuntimeDir() (string, error) {
logrus.Debugf("unable to make temp dir %v", err)
}
st, err := os.Stat(tmpDir)
- if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && st.Mode().Perm() == 0700 {
+ if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
runtimeDir = tmpDir
}
}
diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go
index 2add712f1..1cffb21fc 100644
--- a/vendor/github.com/containers/common/pkg/util/util_windows.go
+++ b/vendor/github.com/containers/common/pkg/util/util_windows.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package util
diff --git a/vendor/github.com/containers/ocicrypt/go.mod b/vendor/github.com/containers/ocicrypt/go.mod
index 02be18591..8837d288e 100644
--- a/vendor/github.com/containers/ocicrypt/go.mod
+++ b/vendor/github.com/containers/ocicrypt/go.mod
@@ -5,9 +5,9 @@ go 1.12
require (
github.com/golang/protobuf v1.4.3
github.com/google/go-cmp v0.5.2 // indirect
- github.com/miekg/pkcs11 v1.0.3
+ github.com/miekg/pkcs11 v1.1.1
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/image-spec v1.0.1
+ github.com/opencontainers/image-spec v1.0.2
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.7.0
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980
diff --git a/vendor/github.com/containers/ocicrypt/go.sum b/vendor/github.com/containers/ocicrypt/go.sum
index 7153900da..a621a145c 100644
--- a/vendor/github.com/containers/ocicrypt/go.sum
+++ b/vendor/github.com/containers/ocicrypt/go.sum
@@ -30,12 +30,12 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
+github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
+github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 00a3b5e4d..a780ef5da 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -292,6 +292,31 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
backingFs = fsName
}
+ runhome := filepath.Join(options.RunRoot, filepath.Base(home))
+ rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create the driver home dir
+ if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
+ return nil, err
+ }
+
+ if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
+ return nil, err
+ }
+
+ if opts.mountProgram == "" {
+ if supported, err := SupportsNativeOverlay(home, runhome); err != nil {
+ return nil, err
+ } else if !supported {
+ if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
+ opts.mountProgram = path
+ }
+ }
+ }
+
if opts.mountProgram != "" {
if unshare.IsRootless() && isNetworkFileSystem(fsMagic) && opts.forceMask == nil {
m := os.FileMode(0700)
@@ -316,20 +341,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
}
}
- rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
- if err != nil {
- return nil, err
- }
-
- // Create the driver home dir
- if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil {
- return nil, err
- }
- runhome := filepath.Join(options.RunRoot, filepath.Base(home))
- if err := idtools.MkdirAllAs(runhome, 0700, rootUID, rootGID); err != nil {
- return nil, err
- }
-
var usingMetacopy bool
var supportsDType bool
var supportsVolatile *bool
@@ -569,14 +580,11 @@ func cachedFeatureRecord(runhome, feature string, supported bool, text string) (
return err
}
-func SupportsNativeOverlay(graphroot, rundir string) (bool, error) {
- if os.Geteuid() != 0 || graphroot == "" || rundir == "" {
+func SupportsNativeOverlay(home, runhome string) (bool, error) {
+ if os.Geteuid() != 0 || home == "" || runhome == "" {
return false, nil
}
- home := filepath.Join(graphroot, "overlay")
- runhome := filepath.Join(rundir, "overlay")
-
var contents string
flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home))
if err == nil {
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 3e8d51f6a..4da8384af 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -6,13 +6,13 @@ require (
github.com/BurntSushi/toml v1.0.0
github.com/Microsoft/go-winio v0.5.2
github.com/Microsoft/hcsshim v0.9.2
- github.com/containerd/stargz-snapshotter/estargz v0.11.2
+ github.com/containerd/stargz-snapshotter/estargz v0.11.3
github.com/cyphar/filepath-securejoin v0.2.3
github.com/docker/go-units v0.4.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.15.0
+ github.com/klauspost/compress v1.15.1
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
@@ -23,7 +23,7 @@ require (
github.com/opencontainers/selinux v1.10.0
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.8.1
- github.com/stretchr/testify v1.7.0
+ github.com/stretchr/testify v1.7.1
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/ulikunitz/xz v0.5.10
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index ef6b711cf..b995da734 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -176,8 +176,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
-github.com/containerd/stargz-snapshotter/estargz v0.11.2 h1:0P0vWmfrEeTtZ4BBRrpuyu/HxR9HPBLfeljGOra5f6g=
-github.com/containerd/stargz-snapshotter/estargz v0.11.2/go.mod h1:rjbdAXaytDSIrAy2WAy2kUrJ4ehzDS0eUQLlIb5UCY0=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
+github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -424,8 +424,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
-github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
+github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -622,8 +622,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index 567985b98..a71c6d2ef 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -3,14 +3,12 @@ package types
import (
"fmt"
"os"
- "os/exec"
"path/filepath"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
- "github.com/containers/storage/drivers/overlay"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/idtools"
"github.com/sirupsen/logrus"
@@ -225,25 +223,11 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
opts.GraphDriverName = overlayDriver
}
- if opts.GraphDriverName == "" || opts.GraphDriverName == overlayDriver {
- supported, err := overlay.SupportsNativeOverlay(opts.GraphRoot, rootlessRuntime)
- if err != nil {
- return opts, err
- }
- if supported {
- opts.GraphDriverName = overlayDriver
- } else {
- if path, err := exec.LookPath("fuse-overlayfs"); err == nil {
- opts.GraphDriverName = overlayDriver
- opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)}
- }
- }
- if opts.GraphDriverName == overlayDriver {
- for _, o := range systemOpts.GraphDriverOptions {
- if strings.Contains(o, "ignore_chown_errors") {
- opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
- break
- }
+ if opts.GraphDriverName == overlayDriver {
+ for _, o := range systemOpts.GraphDriverOptions {
+ if strings.Contains(o, "ignore_chown_errors") {
+ opts.GraphDriverOptions = append(opts.GraphDriverOptions, o)
+ break
}
}
}
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 9ddf39f6f..0e2dc116a 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,23 @@ This package provides various compression algorithms.
# changelog
+* Mar 3, 2022 (v1.15.0)
+ * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
+ * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+
+<details>
+ <summary>See Details</summary>
+Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
+
+Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
+
+While the release has been extensively tested, it is recommended to testing when upgrading.
+</details>
+
* Feb 22, 2022 (v1.14.4)
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go
new file mode 100644
index 000000000..ff2c69d60
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/autogen.go
@@ -0,0 +1,5 @@
+package huff0
+
+//go:generate go run generate.go
+//go:generate asmfmt -w decompress_amd64.s
+//go:generate asmfmt -w decompress_8b_amd64.s
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index 03562db16..451160edd 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -165,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63))
}
+// peekTopBits(n) is equvialent to peekBitFast(64 - n)
+func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
+ return uint16(b.value >> n)
+}
+
func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 3ae7d4677..04f652995 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -729,189 +729,6 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
-func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
- if len(d.dt.single) == 0 {
- return nil, errors.New("no table loaded")
- }
- if len(src) < 6+(4*1) {
- return nil, errors.New("input too small")
- }
- if use8BitTables && d.actualTableLog <= 8 {
- return d.decompress4X8bit(dst, src)
- }
-
- var br [4]bitReaderShifted
- // Decode "jump table"
- start := 6
- for i := 0; i < 3; i++ {
- length := int(src[i*2]) | (int(src[i*2+1]) << 8)
- if start+length >= len(src) {
- return nil, errors.New("truncated input (or invalid offset)")
- }
- err := br[i].init(src[start : start+length])
- if err != nil {
- return nil, err
- }
- start += length
- }
- err := br[3].init(src[start:])
- if err != nil {
- return nil, err
- }
-
- // destination, offset to match first output
- dstSize := cap(dst)
- dst = dst[:dstSize]
- out := dst
- dstEvery := (dstSize + 3) / 4
-
- const tlSize = 1 << tableLogMax
- const tlMask = tlSize - 1
- single := d.dt.single[:tlSize]
-
- // Use temp table to avoid bound checks/append penalty.
- buf := d.buffer()
- var off uint8
- var decoded int
-
- // Decode 2 values from each decoder/loop.
- const bufoff = 256
- for {
- if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
- break
- }
-
- {
- const stream = 0
- const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- v2 := single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[stream][off] = uint8(v.entry >> 8)
- buf[stream2][off] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- v2 = single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[stream][off+1] = uint8(v.entry >> 8)
- buf[stream2][off+1] = uint8(v2.entry >> 8)
- }
-
- {
- const stream = 2
- const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- v2 := single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[stream][off] = uint8(v.entry >> 8)
- buf[stream2][off] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- v2 = single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[stream][off+1] = uint8(v.entry >> 8)
- buf[stream2][off+1] = uint8(v2.entry >> 8)
- }
-
- off += 2
-
- if off == 0 {
- if bufoff > dstEvery {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 1")
- }
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
- // There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 2")
- }
- }
- }
- if off > 0 {
- ioff := int(off)
- if len(out) < dstEvery*3+ioff {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 3")
- }
- copy(out, buf[0][:off])
- copy(out[dstEvery:], buf[1][:off])
- copy(out[dstEvery*2:], buf[2][:off])
- copy(out[dstEvery*3:], buf[3][:off])
- decoded += int(off) * 4
- out = out[off:]
- }
-
- // Decode remaining.
- remainBytes := dstEvery - (decoded / 4)
- for i := range br {
- offset := dstEvery * i
- endsAt := offset + remainBytes
- if endsAt > len(out) {
- endsAt = len(out)
- }
- br := &br[i]
- bitsLeft := br.remaining()
- for bitsLeft > 0 {
- br.fill()
- if offset >= endsAt {
- d.bufs.Put(buf)
- return nil, errors.New("corruption detected: stream overrun 4")
- }
-
- // Read value and increment offset.
- val := br.peekBitsFast(d.actualTableLog)
- v := single[val&tlMask].entry
- nBits := uint8(v)
- br.advance(nBits)
- bitsLeft -= uint(nBits)
- out[offset] = uint8(v >> 8)
- offset++
- }
- if offset != endsAt {
- d.bufs.Put(buf)
- return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
- }
- decoded += offset - dstEvery*i
- err = br.close()
- if err != nil {
- return nil, err
- }
- }
- d.bufs.Put(buf)
- if dstSize != decoded {
- return nil, errors.New("corruption detected: short output block")
- }
- return dst, nil
-}
-
-// Decompress4X will decompress a 4X encoded stream.
-// The length of the supplied input must match the end of a block exactly.
-// The *capacity* of the dst slice must match the destination size of
-// the uncompressed data exactly.
func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
if d.actualTableLog == 8 {
return d.decompress4X8bitExactly(dst, src)
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
new file mode 100644
index 000000000..0d6cb1a96
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
@@ -0,0 +1,488 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+
+ // const stream = 0
+ // br0.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
+ MOVQ bitReaderShifted_value(br0), br_value
+ MOVQ bitReaderShifted_off(br0), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill0
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br0), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill0:
+
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br0.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br0.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 0(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br0.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br0.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br0.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br0.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 0+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
+ MOVQ br_value, bitReaderShifted_value(br0)
+ MOVQ br_offset, bitReaderShifted_off(br0)
+
+ // const stream = 1
+ // br1.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
+ MOVQ bitReaderShifted_value(br1), br_value
+ MOVQ bitReaderShifted_off(br1), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill1
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br1), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill1:
+
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br1.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br1.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 256(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br1.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br1.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br1.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br1.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 256+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
+ MOVQ br_value, bitReaderShifted_value(br1)
+ MOVQ br_offset, bitReaderShifted_off(br1)
+
+ // const stream = 2
+ // br2.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
+ MOVQ bitReaderShifted_value(br2), br_value
+ MOVQ bitReaderShifted_off(br2), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill2
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br2), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill2:
+
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br2.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br2.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 512(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br2.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br2.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br2.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br2.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 512+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
+ MOVQ br_value, bitReaderShifted_value(br2)
+ MOVQ br_offset, bitReaderShifted_off(br2)
+
+ // const stream = 3
+ // br3.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
+ MOVQ bitReaderShifted_value(br3), br_value
+ MOVQ bitReaderShifted_off(br3), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill3
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br3), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill3:
+
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br3.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br3.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 768(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br3.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br3.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br3.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br3.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 768+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
+ MOVQ br_value, bitReaderShifted_value(br3)
+ MOVQ br_offset, bitReaderShifted_off(br3)
+
+ ADDQ $4, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
new file mode 100644
index 000000000..6d477a2c1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
@@ -0,0 +1,197 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+{{ define "decode_2_values_x86" }}
+ // const stream = {{ var "id" }}
+ // br{{ var "id"}}.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
+ MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
+ MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill{{ var "id" }}
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br{{ var "id"}}.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+ // }
+skip_fill{{ var "id" }}:
+
+ // val0 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br{{ var "id"}}.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br{{ var "id"}}.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br{{ var "id"}}.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br{{ var "id"}}.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
+ MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
+ MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
+{{ end }}
+
+ {{ set "id" "0" }}
+ {{ set "ofs" "0" }}
+ {{ set "bufofs" "0" }} {{/* id * bufoff */}}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "1" }}
+ {{ set "ofs" "8" }}
+ {{ set "bufofs" "256" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "2" }}
+ {{ set "ofs" "16" }}
+ {{ set "bufofs" "512" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "3" }}
+ {{ set "ofs" "24" }}
+ {{ set "bufofs" "768" }}
+ {{ template "decode_2_values_x86" . }}
+
+ ADDQ $4, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
new file mode 100644
index 000000000..d47f6644f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -0,0 +1,181 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// This file contains the specialisation of Decoder.Decompress4X
+// that uses an asm implementation of its main loop.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+)
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog > 8.
+// go:noescape
+func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+ peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+
+// decompress4x_8b_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog <= 8 which decodes 4 entries
+// per loop.
+// go:noescape
+func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+ peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+
+// fallback8BitSize is the size where using Go version is faster.
+const fallback8BitSize = 800
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+
+ use8BitTables := d.actualTableLog <= 8
+ if cap(dst) < fallback8BitSize && use8BitTables {
+ return d.decompress4X8bit(dst, src)
+ }
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ const debug = false
+
+ // see: bitReaderShifted.peekBitsFast()
+ peekBits := uint8((64 - d.actualTableLog) & 63)
+
+ // Decode 2 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ if use8BitTables {
+ off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
+ } else {
+ off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
+ }
+ if debug {
+ fmt.Print("DEBUG: ")
+ fmt.Printf("off=%d,", off)
+ for i := 0; i < 4; i++ {
+ fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
+ i, br[i].bitsRead, br[i].value, br[i].off)
+ }
+ fmt.Println("")
+ }
+
+ if off != 0 {
+ break
+ }
+
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ // There must at least be 3 buffers left.
+ if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
new file mode 100644
index 000000000..2edad3ea5
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -0,0 +1,506 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+
+ // const stream = 0
+ // br0.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
+ MOVQ bitReaderShifted_value(br0), br_value
+ MOVQ bitReaderShifted_off(br0), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill0
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br0), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill0:
+
+ // val0 := br0.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br0.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br0.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 0(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
+ MOVQ br_value, bitReaderShifted_value(br0)
+ MOVQ br_offset, bitReaderShifted_off(br0)
+
+ // const stream = 1
+ // br1.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
+ MOVQ bitReaderShifted_value(br1), br_value
+ MOVQ bitReaderShifted_off(br1), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill1
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br1), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill1:
+
+ // val0 := br1.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br1.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br1.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 256(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
+ MOVQ br_value, bitReaderShifted_value(br1)
+ MOVQ br_offset, bitReaderShifted_off(br1)
+
+ // const stream = 2
+ // br2.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
+ MOVQ bitReaderShifted_value(br2), br_value
+ MOVQ bitReaderShifted_off(br2), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill2
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br2), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill2:
+
+ // val0 := br2.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br2.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br2.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 512(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
+ MOVQ br_value, bitReaderShifted_value(br2)
+ MOVQ br_offset, bitReaderShifted_off(br2)
+
+ // const stream = 3
+ // br3.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
+ MOVQ bitReaderShifted_value(br3), br_value
+ MOVQ bitReaderShifted_off(br3), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill3
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br3), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill3:
+
+ // val0 := br3.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br3.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br3.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 768(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
+ MOVQ br_value, bitReaderShifted_value(br3)
+ MOVQ br_offset, bitReaderShifted_off(br3)
+
+ ADDQ $2, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
new file mode 100644
index 000000000..330d86ae1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
@@ -0,0 +1,195 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+{{ define "decode_2_values_x86" }}
+ // const stream = {{ var "id" }}
+ // br{{ var "id"}}.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
+ MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
+ MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill{{ var "id" }}
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br{{ var "id"}}.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+ // }
+skip_fill{{ var "id" }}:
+
+ // val0 := br{{ var "id"}}.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br{{ var "id"}}.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+#else
+ // val1 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br{{ var "id"}}.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
+ MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
+ MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
+{{ end }}
+
+ {{ set "id" "0" }}
+ {{ set "ofs" "0" }}
+ {{ set "bufofs" "0" }} {{/* id * bufoff */}}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "1" }}
+ {{ set "ofs" "8" }}
+ {{ set "bufofs" "256" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "2" }}
+ {{ set "ofs" "16" }}
+ {{ set "bufofs" "512" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "3" }}
+ {{ set "ofs" "24" }}
+ {{ set "bufofs" "768" }}
+ {{ template "decode_2_values_x86" . }}
+
+ ADDQ $2, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
new file mode 100644
index 000000000..126b4d68a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -0,0 +1,193 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// This file contains a generic implementation of Decoder.Decompress4X.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress4X8bit(dst, src)
+ }
+
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ // Decode 2 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ const stream = 0
+ const stream2 = 1
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ off += 2
+
+ if off == 0 {
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ // There must at least be 3 buffers left.
+ if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index c876c591a..e3445ac19 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -153,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
This package:
file out level insize outsize millis mb/s
-silesia.tar zskp 1 211947520 73101992 643 313.87
-silesia.tar zskp 2 211947520 67504318 969 208.38
-silesia.tar zskp 3 211947520 64595893 2007 100.68
-silesia.tar zskp 4 211947520 60995370 8825 22.90
+silesia.tar zskp 1 211947520 73821326 634 318.47
+silesia.tar zskp 2 211947520 67655404 1508 133.96
+silesia.tar zskp 3 211947520 64746933 3000 67.37
+silesia.tar zskp 4 211947520 60073508 16926 11.94
cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56
@@ -165,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package:
-silesia.tar gzstd 1 211947520 80007735 1654 122.21
-silesia.tar gzkp 1 211947520 80136201 1152 175.45
+silesia.tar gzstd 1 211947520 80007735 1498 134.87
+silesia.tar gzkp 1 211947520 80088272 1009 200.31
GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z
file out level insize outsize millis mb/s
-gob-stream zskp 1 1911399616 235022249 3088 590.30
-gob-stream zskp 2 1911399616 205669791 3786 481.34
-gob-stream zskp 3 1911399616 175034659 9636 189.17
-gob-stream zskp 4 1911399616 165609838 50369 36.19
+gob-stream zskp 1 1911399616 233948096 3230 564.34
+gob-stream zskp 2 1911399616 203997694 4997 364.73
+gob-stream zskp 3 1911399616 173526523 13435 135.68
+gob-stream zskp 4 1911399616 162195235 47559 38.33
gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70
-gob-stream gzstd 1 1911399616 357382641 10251 177.82
-gob-stream gzkp 1 1911399616 359753026 5438 335.20
+gob-stream gzstd 1 1911399616 357382013 9046 201.49
+gob-stream gzkp 1 1911399616 359136669 4885 373.08
The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
http://mattmahoney.net/dc/textdata.html
file out level insize outsize millis mb/s
-enwik9 zskp 1 1000000000 343848582 3609 264.18
-enwik9 zskp 2 1000000000 317276632 5746 165.97
-enwik9 zskp 3 1000000000 292243069 12162 78.41
-enwik9 zskp 4 1000000000 262183768 82837 11.51
+enwik9 zskp 1 1000000000 343833605 3687 258.64
+enwik9 zskp 2 1000000000 317001237 7672 124.29
+enwik9 zskp 3 1000000000 291915823 15923 59.89
+enwik9 zskp 4 1000000000 261710291 77697 12.27
enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40
-enwik9 gzstd 1 1000000000 382578136 9604 99.30
-enwik9 gzkp 1 1000000000 383825945 6544 145.73
+enwik9 gzstd 1 1000000000 382578136 8608 110.78
+enwik9 gzkp 1 1000000000 382781160 5628 169.45
Highly compressible JSON file.
https://files.klauspost.com/compress/github-june-2days-2019.json.zst
file out level insize outsize millis mb/s
-github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
-github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
-github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
-github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
+github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
+github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
+github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
+github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
-github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
-github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
+github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
+github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z
file out level insize outsize millis mb/s
-rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
-rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
-rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
-rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
+rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
+rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
+rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
+rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
-rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
-rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
+rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
+rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
file out level insize outsize millis mb/s
-nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
-nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
-nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
-nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
+nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
+nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
+nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
+nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
-nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
-nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
+nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
+nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
```
## Decompressor
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 607b62ee3..7d567a54a 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -167,6 +167,11 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
return ErrCompressedSizeTooBig
}
+ // Empty compressed blocks must at least be 2 bytes
+ // for Literals_Block_Type and one for Sequences_Section_Header.
+ if cSize < 2 {
+ return ErrBlockTooSmall
+ }
case blockTypeRaw:
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
if debugDecoder {
@@ -491,6 +496,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
+ if debugDecoder {
+ printf("prepareSequences: %d byte(s) input\n", len(in))
+ }
// Decode Sequences
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
if len(in) < 1 {
@@ -499,8 +507,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
var nSeqs int
seqHeader := in[0]
switch {
- case seqHeader == 0:
- in = in[1:]
case seqHeader < 128:
nSeqs = int(seqHeader)
in = in[1:]
@@ -517,6 +523,13 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:]
}
+ if nSeqs == 0 && len(in) != 0 {
+ // When no sequences, there should not be any more data...
+ if debugDecoder {
+ printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
+ }
+ return ErrUnexpectedBlockSize
+ }
var seqs = &hist.decoders
seqs.nSeqs = nSeqs
@@ -635,6 +648,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
hist.decoders.seqSize = len(hist.decoders.literals)
return nil
}
+ hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets
err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index a93dfaf10..9fcdaac1d 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -348,10 +348,10 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.history.setDict(&dict)
}
- if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+ if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded
}
- if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
+ if frame.FrameContentSize < 1<<30 {
// Never preallocate more than 1 GB up front.
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
@@ -514,7 +514,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
// Check frame size (before CRC)
d.syncStream.decodedFrame += uint64(len(d.current.b))
- if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize {
+ if d.syncStream.decodedFrame > d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
@@ -523,7 +523,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
}
// Check FCS
- if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize {
+ if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
if debugDecoder {
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
}
@@ -700,6 +700,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
+ hist.windowSize = block.async.newHist.windowSize
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
@@ -811,11 +812,11 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if !hasErr {
decodedFrame += uint64(len(do.b))
- if fcs > 0 && decodedFrame > fcs {
+ if decodedFrame > fcs {
println("fcs exceeded", block.Last, fcs, decodedFrame)
do.err = ErrFrameSizeExceeded
hasErr = true
- } else if block.Last && fcs > 0 && decodedFrame != fcs {
+ } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
do.err = ErrFrameSizeMismatch
hasErr = true
} else {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 29c3176b0..11089d223 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -197,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
default:
fcsSize = 1 << v
}
- d.FrameContentSize = 0
+ d.FrameContentSize = fcsUnknown
if fcsSize > 0 {
b, err := br.readSmall(fcsSize)
if err != nil {
@@ -343,12 +343,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
err = ErrDecoderSizeExceeded
break
}
- if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
- println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
- err = ErrFrameSizeExceeded
- break
- }
- if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
+ if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
err = ErrFrameSizeExceeded
break
@@ -356,13 +351,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if dec.Last {
break
}
- if debugDecoder && d.FrameContentSize > 0 {
+ if debugDecoder {
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
}
}
dst = d.history.b
if err == nil {
- if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
+ if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch
} else if d.HasCheckSum {
var n int
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go
index fda8a7422..7f2210e05 100644
--- a/vendor/github.com/klauspost/compress/zstd/fuzz.go
+++ b/vendor/github.com/klauspost/compress/zstd/fuzz.go
@@ -1,5 +1,5 @@
-//go:build gofuzz
-// +build gofuzz
+//go:build ignorecrc
+// +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
index 0515b201c..6811c68a8 100644
--- a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
+++ b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
@@ -1,5 +1,5 @@
-//go:build !gofuzz
-// +build !gofuzz
+//go:build !ignorecrc
+// +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 213736ad7..819f1461b 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -107,7 +107,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
-
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
@@ -192,7 +195,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
@@ -230,7 +233,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
@@ -347,6 +350,10 @@ func (s *sequenceDecs) decodeSync(history *history) error {
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
hist := history.b[history.ignoreBuffer:]
out := s.out
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
@@ -426,7 +433,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size", size)
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
}
if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
@@ -535,6 +542,11 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
}
+ // Check if space for literals
+ if len(s.literals)+len(s.out)-startSize > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
+ }
+
// Add final literals
s.out = append(out, s.literals...)
return br.close()
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index 967f29b31..ffffcbc25 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -20,7 +20,7 @@ const ZipMethodPKWare = 20
var zipReaderPool sync.Pool
-// newZipReader cannot be used since we would leak goroutines...
+// newZipReader creates a pooled zip decompressor.
func newZipReader(r io.Reader) io.ReadCloser {
dec, ok := zipReaderPool.Get().(*Decoder)
if ok {
@@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.dec == nil {
- return 0, errors.New("Read after Close")
+ return 0, errors.New("read after close or EOF")
}
dec, err := r.dec.Read(p)
-
+ if err == io.EOF {
+ err = r.dec.Reset(nil)
+ zipReaderPool.Put(r.dec)
+ r.dec = nil
+ }
return dec, err
}
@@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
- return func(r io.Reader) io.ReadCloser {
- d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
- if err != nil {
- panic(err)
- }
- return d.IOReadCloser()
- }
+ return newZipReader
}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 0b0c2571d..c1c90b4a0 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -39,6 +39,9 @@ const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
+// fcsUnknown is used for unknown frame content size.
+const fcsUnknown = math.MaxUint64
+
var (
// ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input.
@@ -52,6 +55,10 @@ var (
// Typically returned on invalid input.
ErrBlockTooSmall = errors.New("block too small")
+ // ErrUnexpectedBlockSize is returned when a block has unexpected size.
+ // Typically returned on invalid input.
+ ErrUnexpectedBlockSize = errors.New("unexpected block size")
+
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
// Typically this indicates wrong or corrupted input.
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
diff --git a/vendor/github.com/miekg/pkcs11/.travis.yml b/vendor/github.com/miekg/pkcs11/.travis.yml
deleted file mode 100644
index 687044d83..000000000
--- a/vendor/github.com/miekg/pkcs11/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-sudo: required
-dist: trusty
-
-go:
- - 1.9
- - tip
-
-script:
- - go test -v ./...
-
-before_script:
- - sudo apt-get update
- - sudo apt-get -y install libsofthsm
diff --git a/vendor/github.com/miekg/pkcs11/README.md b/vendor/github.com/miekg/pkcs11/README.md
index 0a5c1b7b6..18a361a99 100644
--- a/vendor/github.com/miekg/pkcs11/README.md
+++ b/vendor/github.com/miekg/pkcs11/README.md
@@ -1,6 +1,6 @@
-# PKCS#11 [![Build Status](https://travis-ci.org/miekg/pkcs11.png?branch=master)](https://travis-ci.org/miekg/pkcs11) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/miekg/pkcs11)
+# PKCS#11
-This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom were
+This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom where
it makes sense. It has been tested with SoftHSM.
## SoftHSM
@@ -13,10 +13,10 @@ it makes sense. It has been tested with SoftHSM.
softhsm --init-token --slot 0 --label test --pin 1234
~~~
- * Then use `libsofthsm.so` as the pkcs11 module:
+ * Then use `libsofthsm2.so` as the pkcs11 module:
~~~ go
- p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so")
+ p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
~~~
## Examples
@@ -24,7 +24,7 @@ it makes sense. It has been tested with SoftHSM.
A skeleton program would look somewhat like this (yes, pkcs#11 is verbose):
~~~ go
-p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so")
+p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
err := p.Initialize()
if err != nil {
panic(err)
diff --git a/vendor/github.com/miekg/pkcs11/pkcs11.go b/vendor/github.com/miekg/pkcs11/pkcs11.go
index e21d23b73..e1b5824ec 100644
--- a/vendor/github.com/miekg/pkcs11/pkcs11.go
+++ b/vendor/github.com/miekg/pkcs11/pkcs11.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:generate go run const_generate.go
+
// Package pkcs11 is a wrapper around the PKCS#11 cryptographic library.
package pkcs11
@@ -14,7 +16,7 @@ package pkcs11
#cgo windows CFLAGS: -DPACKED_STRUCTURES
#cgo linux LDFLAGS: -ldl
#cgo darwin LDFLAGS: -ldl
-#cgo openbsd LDFLAGS: -ldl
+#cgo openbsd LDFLAGS:
#cgo freebsd LDFLAGS: -ldl
#include <stdlib.h>
@@ -770,9 +772,10 @@ static inline CK_VOID_PTR getAttributePval(CK_ATTRIBUTE_PTR a)
*/
import "C"
-import "strings"
-
-import "unsafe"
+import (
+ "strings"
+ "unsafe"
+)
// Ctx contains the current pkcs11 context.
type Ctx struct {
diff --git a/vendor/github.com/miekg/pkcs11/release.go b/vendor/github.com/miekg/pkcs11/release.go
index 4380f374d..d8b99f147 100644
--- a/vendor/github.com/miekg/pkcs11/release.go
+++ b/vendor/github.com/miekg/pkcs11/release.go
@@ -1,3 +1,4 @@
+//go:build release
// +build release
package pkcs11
@@ -5,7 +6,7 @@ package pkcs11
import "fmt"
// Release is current version of the pkcs11 library.
-var Release = R{1, 0, 3}
+var Release = R{1, 1, 1}
// R holds the version of this library.
type R struct {
diff --git a/vendor/github.com/miekg/pkcs11/types.go b/vendor/github.com/miekg/pkcs11/types.go
index 970db9061..60eadcb71 100644
--- a/vendor/github.com/miekg/pkcs11/types.go
+++ b/vendor/github.com/miekg/pkcs11/types.go
@@ -182,8 +182,20 @@ func NewAttribute(typ uint, x interface{}) *Attribute {
}
case int:
a.Value = uintToBytes(uint64(v))
+ case int16:
+ a.Value = uintToBytes(uint64(v))
+ case int32:
+ a.Value = uintToBytes(uint64(v))
+ case int64:
+ a.Value = uintToBytes(uint64(v))
case uint:
a.Value = uintToBytes(uint64(v))
+ case uint16:
+ a.Value = uintToBytes(uint64(v))
+ case uint32:
+ a.Value = uintToBytes(uint64(v))
+ case uint64:
+ a.Value = uintToBytes(uint64(v))
case string:
a.Value = []byte(v)
case []byte:
diff --git a/vendor/github.com/miekg/pkcs11/const.go b/vendor/github.com/miekg/pkcs11/zconst.go
index 408856146..41df5cfcf 100644
--- a/vendor/github.com/miekg/pkcs11/const.go
+++ b/vendor/github.com/miekg/pkcs11/zconst.go
@@ -2,48 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package pkcs11
-
-const (
- CKU_SO uint = 0
- CKU_USER uint = 1
- CKU_CONTEXT_SPECIFIC uint = 2
-)
-
-const (
- CKO_DATA uint = 0x00000000
- CKO_CERTIFICATE uint = 0x00000001
- CKO_PUBLIC_KEY uint = 0x00000002
- CKO_PRIVATE_KEY uint = 0x00000003
- CKO_SECRET_KEY uint = 0x00000004
- CKO_HW_FEATURE uint = 0x00000005
- CKO_DOMAIN_PARAMETERS uint = 0x00000006
- CKO_MECHANISM uint = 0x00000007
- CKO_OTP_KEY uint = 0x00000008
- CKO_VENDOR_DEFINED uint = 0x80000000
-)
-
-const (
- CKG_MGF1_SHA1 uint = 0x00000001
- CKG_MGF1_SHA224 uint = 0x00000005
- CKG_MGF1_SHA256 uint = 0x00000002
- CKG_MGF1_SHA384 uint = 0x00000003
- CKG_MGF1_SHA512 uint = 0x00000004
- CKG_MGF1_SHA3_224 uint = 0x00000006
- CKG_MGF1_SHA3_256 uint = 0x00000007
- CKG_MGF1_SHA3_384 uint = 0x00000008
- CKG_MGF1_SHA3_512 uint = 0x00000009
-)
-
-const (
- CKZ_DATA_SPECIFIED uint = 0x00000001
-)
+// Code generated by "go run const_generate.go"; DO NOT EDIT.
-// Generated with: awk '/#define CK[AFKMRC]/{ print $2 " = " $3 }' pkcs11t.h | sed -e 's/UL$//g' -e 's/UL)$/)/g'
+package pkcs11
-// All the flag (CKF_), attribute (CKA_), error code (CKR_), key type (CKK_), certificate type (CKC_) and
-// mechanism (CKM_) constants as defined in PKCS#11.
const (
+ CK_TRUE = 1
+ CK_FALSE = 0
+ CK_UNAVAILABLE_INFORMATION = ^uint(0)
+ CK_EFFECTIVELY_INFINITE = 0
+ CK_INVALID_HANDLE = 0
+ CKN_SURRENDER = 0
+ CKN_OTP_CHANGED = 1
CKF_TOKEN_PRESENT = 0x00000001
CKF_REMOVABLE_DEVICE = 0x00000002
CKF_HW_SLOT = 0x00000004
@@ -66,12 +36,34 @@ const (
CKF_SO_PIN_LOCKED = 0x00400000
CKF_SO_PIN_TO_BE_CHANGED = 0x00800000
CKF_ERROR_STATE = 0x01000000
+ CKU_SO = 0
+ CKU_USER = 1
+ CKU_CONTEXT_SPECIFIC = 2
+ CKS_RO_PUBLIC_SESSION = 0
+ CKS_RO_USER_FUNCTIONS = 1
+ CKS_RW_PUBLIC_SESSION = 2
+ CKS_RW_USER_FUNCTIONS = 3
+ CKS_RW_SO_FUNCTIONS = 4
CKF_RW_SESSION = 0x00000002
CKF_SERIAL_SESSION = 0x00000004
+ CKO_DATA = 0x00000000
+ CKO_CERTIFICATE = 0x00000001
+ CKO_PUBLIC_KEY = 0x00000002
+ CKO_PRIVATE_KEY = 0x00000003
+ CKO_SECRET_KEY = 0x00000004
+ CKO_HW_FEATURE = 0x00000005
+ CKO_DOMAIN_PARAMETERS = 0x00000006
+ CKO_MECHANISM = 0x00000007
+ CKO_OTP_KEY = 0x00000008
+ CKO_VENDOR_DEFINED = 0x80000000
+ CKH_MONOTONIC_COUNTER = 0x00000001
+ CKH_CLOCK = 0x00000002
+ CKH_USER_INTERFACE = 0x00000003
+ CKH_VENDOR_DEFINED = 0x80000000
CKK_RSA = 0x00000000
CKK_DSA = 0x00000001
CKK_DH = 0x00000002
- CKK_ECDSA = 0x00000003
+ CKK_ECDSA = 0x00000003 // Deprecated
CKK_EC = 0x00000003
CKK_X9_42_DH = 0x00000004
CKK_KEA = 0x00000005
@@ -83,7 +75,7 @@ const (
CKK_DES3 = 0x00000015
CKK_CAST = 0x00000016
CKK_CAST3 = 0x00000017
- CKK_CAST5 = 0x00000018
+ CKK_CAST5 = 0x00000018 // Deprecated
CKK_CAST128 = 0x00000018
CKK_RC5 = 0x00000019
CKK_IDEA = 0x0000001A
@@ -99,14 +91,14 @@ const (
CKK_ACTI = 0x00000024
CKK_CAMELLIA = 0x00000025
CKK_ARIA = 0x00000026
- CKK_SHA512_224_HMAC = 0x00000027
- CKK_SHA512_256_HMAC = 0x00000028
- CKK_SHA512_T_HMAC = 0x00000029
+ CKK_MD5_HMAC = 0x00000027
CKK_SHA_1_HMAC = 0x00000028
- CKK_SHA224_HMAC = 0x0000002E
+ CKK_RIPEMD128_HMAC = 0x00000029
+ CKK_RIPEMD160_HMAC = 0x0000002A
CKK_SHA256_HMAC = 0x0000002B
CKK_SHA384_HMAC = 0x0000002C
CKK_SHA512_HMAC = 0x0000002D
+ CKK_SHA224_HMAC = 0x0000002E
CKK_SEED = 0x0000002F
CKK_GOSTR3410 = 0x00000030
CKK_GOSTR3411 = 0x00000031
@@ -116,11 +108,26 @@ const (
CKK_SHA3_384_HMAC = 0x00000035
CKK_SHA3_512_HMAC = 0x00000036
CKK_VENDOR_DEFINED = 0x80000000
+ CK_CERTIFICATE_CATEGORY_UNSPECIFIED = 0
+ CK_CERTIFICATE_CATEGORY_TOKEN_USER = 1
+ CK_CERTIFICATE_CATEGORY_AUTHORITY = 2
+ CK_CERTIFICATE_CATEGORY_OTHER_ENTITY = 3
+ CK_SECURITY_DOMAIN_UNSPECIFIED = 0
+ CK_SECURITY_DOMAIN_MANUFACTURER = 1
+ CK_SECURITY_DOMAIN_OPERATOR = 2
+ CK_SECURITY_DOMAIN_THIRD_PARTY = 3
CKC_X_509 = 0x00000000
CKC_X_509_ATTR_CERT = 0x00000001
CKC_WTLS = 0x00000002
CKC_VENDOR_DEFINED = 0x80000000
CKF_ARRAY_ATTRIBUTE = 0x40000000
+ CK_OTP_FORMAT_DECIMAL = 0
+ CK_OTP_FORMAT_HEXADECIMAL = 1
+ CK_OTP_FORMAT_ALPHANUMERIC = 2
+ CK_OTP_FORMAT_BINARY = 3
+ CK_OTP_PARAM_IGNORED = 0
+ CK_OTP_PARAM_OPTIONAL = 1
+ CK_OTP_PARAM_MANDATORY = 2
CKA_CLASS = 0x00000000
CKA_TOKEN = 0x00000001
CKA_PRIVATE = 0x00000002
@@ -183,15 +190,16 @@ const (
CKA_MODIFIABLE = 0x00000170
CKA_COPYABLE = 0x00000171
CKA_DESTROYABLE = 0x00000172
- CKA_ECDSA_PARAMS = 0x00000180
+ CKA_ECDSA_PARAMS = 0x00000180 // Deprecated
CKA_EC_PARAMS = 0x00000180
CKA_EC_POINT = 0x00000181
- CKA_SECONDARY_AUTH = 0x00000200
- CKA_AUTH_PIN_FLAGS = 0x00000201
+ CKA_SECONDARY_AUTH = 0x00000200 // Deprecated
+ CKA_AUTH_PIN_FLAGS = 0x00000201 // Deprecated
CKA_ALWAYS_AUTHENTICATE = 0x00000202
CKA_WRAP_WITH_TRUSTED = 0x00000210
- CKA_WRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000211
- CKA_UNWRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000212
+ CKA_WRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000211)
+ CKA_UNWRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000212)
+ CKA_DERIVE_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000213)
CKA_OTP_FORMAT = 0x00000220
CKA_OTP_LENGTH = 0x00000221
CKA_OTP_TIME_INTERVAL = 0x00000222
@@ -226,7 +234,7 @@ const (
CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501
CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502
CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503
- CKA_ALLOWED_MECHANISMS = CKF_ARRAY_ATTRIBUTE | 0x00000600
+ CKA_ALLOWED_MECHANISMS = (CKF_ARRAY_ATTRIBUTE | 0x00000600)
CKA_VENDOR_DEFINED = 0x80000000
CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000
CKM_RSA_PKCS = 0x00000001
@@ -246,11 +254,10 @@ const (
CKM_DSA_KEY_PAIR_GEN = 0x00000010
CKM_DSA = 0x00000011
CKM_DSA_SHA1 = 0x00000012
- CKM_DSA_FIPS_G_GEN = 0x00000013
- CKM_DSA_SHA224 = 0x00000014
- CKM_DSA_SHA256 = 0x00000015
- CKM_DSA_SHA384 = 0x00000016
- CKM_DSA_SHA512 = 0x00000017
+ CKM_DSA_SHA224 = 0x00000013
+ CKM_DSA_SHA256 = 0x00000014
+ CKM_DSA_SHA384 = 0x00000015
+ CKM_DSA_SHA512 = 0x00000016
CKM_DSA_SHA3_224 = 0x00000018
CKM_DSA_SHA3_256 = 0x00000019
CKM_DSA_SHA3_384 = 0x0000001A
@@ -387,13 +394,13 @@ const (
CKM_CAST128_KEY_GEN = 0x00000320
CKM_CAST5_ECB = 0x00000321
CKM_CAST128_ECB = 0x00000321
- CKM_CAST5_CBC = 0x00000322
+ CKM_CAST5_CBC = 0x00000322 // Deprecated
CKM_CAST128_CBC = 0x00000322
- CKM_CAST5_MAC = 0x00000323
+ CKM_CAST5_MAC = 0x00000323 // Deprecated
CKM_CAST128_MAC = 0x00000323
- CKM_CAST5_MAC_GENERAL = 0x00000324
+ CKM_CAST5_MAC_GENERAL = 0x00000324 // Deprecated
CKM_CAST128_MAC_GENERAL = 0x00000324
- CKM_CAST5_CBC_PAD = 0x00000325
+ CKM_CAST5_CBC_PAD = 0x00000325 // Deprecated
CKM_CAST128_CBC_PAD = 0x00000325
CKM_RC5_KEY_GEN = 0x00000330
CKM_RC5_ECB = 0x00000331
@@ -441,9 +448,9 @@ const (
CKM_PBE_MD5_DES_CBC = 0x000003A1
CKM_PBE_MD5_CAST_CBC = 0x000003A2
CKM_PBE_MD5_CAST3_CBC = 0x000003A3
- CKM_PBE_MD5_CAST5_CBC = 0x000003A4
+ CKM_PBE_MD5_CAST5_CBC = 0x000003A4 // Deprecated
CKM_PBE_MD5_CAST128_CBC = 0x000003A4
- CKM_PBE_SHA1_CAST5_CBC = 0x000003A5
+ CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 // Deprecated
CKM_PBE_SHA1_CAST128_CBC = 0x000003A5
CKM_PBE_SHA1_RC4_128 = 0x000003A6
CKM_PBE_SHA1_RC4_40 = 0x000003A7
@@ -522,7 +529,7 @@ const (
CKM_BATON_COUNTER = 0x00001034
CKM_BATON_SHUFFLE = 0x00001035
CKM_BATON_WRAP = 0x00001036
- CKM_ECDSA_KEY_PAIR_GEN = 0x00001040
+ CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 // Deprecated
CKM_EC_KEY_PAIR_GEN = 0x00001040
CKM_ECDSA = 0x00001041
CKM_ECDSA_SHA1 = 0x00001042
@@ -551,9 +558,9 @@ const (
CKM_AES_CTR = 0x00001086
CKM_AES_GCM = 0x00001087
CKM_AES_CCM = 0x00001088
- CKM_AES_CMAC_GENERAL = 0x00001089
+ CKM_AES_CTS = 0x00001089
CKM_AES_CMAC = 0x0000108A
- CKM_AES_CTS = 0x0000108B
+ CKM_AES_CMAC_GENERAL = 0x0000108B
CKM_AES_XCBC_MAC = 0x0000108C
CKM_AES_XCBC_MAC_96 = 0x0000108D
CKM_AES_GMAC = 0x0000108E
@@ -704,33 +711,56 @@ const (
CKR_MUTEX_NOT_LOCKED = 0x000001A1
CKR_NEW_PIN_MODE = 0x000001B0
CKR_NEXT_OTP = 0x000001B1
- CKR_EXCEEDED_MAX_ITERATIONS = 0x000001C0
- CKR_FIPS_SELF_TEST_FAILED = 0x000001C1
- CKR_LIBRARY_LOAD_FAILED = 0x000001C2
- CKR_PIN_TOO_WEAK = 0x000001C3
- CKR_PUBLIC_KEY_INVALID = 0x000001C4
+ CKR_EXCEEDED_MAX_ITERATIONS = 0x000001B5
+ CKR_FIPS_SELF_TEST_FAILED = 0x000001B6
+ CKR_LIBRARY_LOAD_FAILED = 0x000001B7
+ CKR_PIN_TOO_WEAK = 0x000001B8
+ CKR_PUBLIC_KEY_INVALID = 0x000001B9
CKR_FUNCTION_REJECTED = 0x00000200
CKR_VENDOR_DEFINED = 0x80000000
CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001
CKF_OS_LOCKING_OK = 0x00000002
CKF_DONT_BLOCK = 1
+ CKG_MGF1_SHA1 = 0x00000001
+ CKG_MGF1_SHA256 = 0x00000002
+ CKG_MGF1_SHA384 = 0x00000003
+ CKG_MGF1_SHA512 = 0x00000004
+ CKG_MGF1_SHA224 = 0x00000005
+ CKZ_DATA_SPECIFIED = 0x00000001
+ CKD_NULL = 0x00000001
+ CKD_SHA1_KDF = 0x00000002
+ CKD_SHA1_KDF_ASN1 = 0x00000003
+ CKD_SHA1_KDF_CONCATENATE = 0x00000004
+ CKD_SHA224_KDF = 0x00000005
+ CKD_SHA256_KDF = 0x00000006
+ CKD_SHA384_KDF = 0x00000007
+ CKD_SHA512_KDF = 0x00000008
+ CKD_CPDIVERSIFY_KDF = 0x00000009
+ CKD_SHA3_224_KDF = 0x0000000A
+ CKD_SHA3_256_KDF = 0x0000000B
+ CKD_SHA3_384_KDF = 0x0000000C
+ CKD_SHA3_512_KDF = 0x0000000D
+ CKP_PKCS5_PBKD2_HMAC_SHA1 = 0x00000001
+ CKP_PKCS5_PBKD2_HMAC_GOSTR3411 = 0x00000002
+ CKP_PKCS5_PBKD2_HMAC_SHA224 = 0x00000003
+ CKP_PKCS5_PBKD2_HMAC_SHA256 = 0x00000004
+ CKP_PKCS5_PBKD2_HMAC_SHA384 = 0x00000005
+ CKP_PKCS5_PBKD2_HMAC_SHA512 = 0x00000006
+ CKP_PKCS5_PBKD2_HMAC_SHA512_224 = 0x00000007
+ CKP_PKCS5_PBKD2_HMAC_SHA512_256 = 0x00000008
+ CKZ_SALT_SPECIFIED = 0x00000001
+ CK_OTP_VALUE = 0
+ CK_OTP_PIN = 1
+ CK_OTP_CHALLENGE = 2
+ CK_OTP_TIME = 3
+ CK_OTP_COUNTER = 4
+ CK_OTP_FLAGS = 5
+ CK_OTP_OUTPUT_LENGTH = 6
+ CK_OTP_OUTPUT_FORMAT = 7
CKF_NEXT_OTP = 0x00000001
CKF_EXCLUDE_TIME = 0x00000002
CKF_EXCLUDE_COUNTER = 0x00000004
CKF_EXCLUDE_CHALLENGE = 0x00000008
CKF_EXCLUDE_PIN = 0x00000010
CKF_USER_FRIENDLY_OTP = 0x00000020
- CKD_NULL = 0x00000001
- CKD_SHA1_KDF = 0x00000002
-)
-
-// Special return values defined in PKCS#11 v2.40 section 3.2.
-const (
- // CK_EFFECTIVELY_INFINITE may be returned in the CK_TOKEN_INFO fields ulMaxSessionCount and ulMaxRwSessionCount.
- // It indicates there is no practical limit on the number of sessions.
- CK_EFFECTIVELY_INFINITE = 0
-
- // CK_UNAVAILABLE_INFORMATION may be returned for several fields within CK_TOKEN_INFO. It indicates
- // the token is unable or unwilling to provide the requested information.
- CK_UNAVAILABLE_INFORMATION = ^uint(0)
)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
index 83c49b66a..861b4d21c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -49,7 +49,10 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// http.RoundTripper to observe the request result with the provided CounterVec.
// The CounterVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
-// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// panics otherwise. For the "method" label a predefined default label value set
+// is used to filter given values. Values besides predefined values will count
+// as `unknown` method.`WithExtraMethods` can be used to add more
+// methods to the set. Partitioning of the CounterVec happens by HTTP status code
// and/or HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
//
@@ -57,13 +60,18 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// is not incremented.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
-func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
+ rtOpts := &option{}
+ for _, o := range opts {
+ o(rtOpts)
+ }
+
code, method := checkLabels(counter)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
- counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+ counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
}
return resp, err
})
@@ -73,7 +81,10 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
// http.RoundTripper to observe the request duration with the provided
// ObserverVec. The ObserverVec must have zero, one, or two non-const
// non-curried labels. For those, the only allowed label names are "code" and
-// "method". The function panics otherwise. The Observe method of the Observer
+// "method". The function panics otherwise. For the "method" label a predefined
+// default label value set is used to filter given values. Values besides
+// predefined values will count as `unknown` method. `WithExtraMethods`
+// can be used to add more methods to the set. The Observe method of the Observer
// in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
@@ -85,14 +96,19 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
-func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
+ rtOpts := &option{}
+ for _, o := range opts {
+ o(rtOpts)
+ }
+
code, method := checkLabels(obs)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
- obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+ obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
}
return resp, err
})
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index ab037db86..a23f0edc6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -45,7 +45,10 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// http.Handler to observe the request duration with the provided ObserverVec.
// The ObserverVec must have valid metric and label names and must have zero,
// one, or two non-const non-curried labels. For those, the only allowed label
-// names are "code" and "method". The function panics otherwise. The Observe
+// names are "code" and "method". The function panics otherwise. For the "method"
+// label a predefined default label value set is used to filter given values.
+// Values besides predefined values will count as `unknown` method.
+//`WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the request duration
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
// the respective instance label names are present in the ObserverVec. For
@@ -58,7 +61,12 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
-func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
+ mwOpts := &option{}
+ for _, o := range opts {
+ o(mwOpts)
+ }
+
code, method := checkLabels(obs)
if code {
@@ -67,14 +75,14 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+ obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
- obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+ obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
})
}
@@ -82,7 +90,10 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
// to observe the request result with the provided CounterVec. The CounterVec
// must have valid metric and label names and must have zero, one, or two
// non-const non-curried labels. For those, the only allowed label names are
-// "code" and "method". The function panics otherwise. Partitioning of the
+// "code" and "method". The function panics otherwise. For the "method"
+// label a predefined default label value set is used to filter given values.
+// Values besides predefined values will count as `unknown` method.
+// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the
// CounterVec happens by HTTP status code and/or HTTP method if the respective
// instance label names are present in the CounterVec. For unpartitioned
// counting, use a CounterVec with zero labels.
@@ -92,20 +103,25 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
// If the wrapped Handler panics, the Counter is not incremented.
//
// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
+ mwOpts := &option{}
+ for _, o := range opts {
+ o(mwOpts)
+ }
+
code, method := checkLabels(counter)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- counter.With(labels(code, method, r.Method, d.Status())).Inc()
+ counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
- counter.With(labels(code, method, r.Method, 0)).Inc()
+ counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
})
}
@@ -114,7 +130,10 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
// until the response headers are written. The ObserverVec must have valid
// metric and label names and must have zero, one, or two non-const non-curried
// labels. For those, the only allowed label names are "code" and "method". The
-// function panics otherwise. The Observe method of the Observer in the
+// function panics otherwise. For the "method" label a predefined default label
+// value set is used to filter given values. Values besides predefined values
+// will count as `unknown` method.`WithExtraMethods` can be used to add more
+// methods to the set. The Observe method of the Observer in the
// ObserverVec is called with the request duration in seconds. Partitioning
// happens by HTTP status code and/or HTTP method if the respective instance
// label names are present in the ObserverVec. For unpartitioned observations,
@@ -128,13 +147,18 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
// if used with Go1.9+.
//
// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
+ mwOpts := &option{}
+ for _, o := range opts {
+ o(mwOpts)
+ }
+
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
- obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+ obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
})
next.ServeHTTP(d, r)
})
@@ -144,8 +168,11 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
-// are "code" and "method". The function panics otherwise. The Observe method of
-// the Observer in the ObserverVec is called with the request size in
+// are "code" and "method". The function panics otherwise. For the "method"
+// label a predefined default label value set is used to filter given values.
+// Values besides predefined values will count as `unknown` method.
+// `WithExtraMethods` can be used to add more methods to the set. The Observe
+// method of the Observer in the ObserverVec is called with the request size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
@@ -156,7 +183,12 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
+ mwOpts := &option{}
+ for _, o := range opts {
+ o(mwOpts)
+ }
+
code, method := checkLabels(obs)
if code {
@@ -164,14 +196,14 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+ obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+ obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
})
}
@@ -179,8 +211,11 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
-// are "code" and "method". The function panics otherwise. The Observe method of
-// the Observer in the ObserverVec is called with the response size in
+// are "code" and "method". The function panics otherwise. For the "method"
+// label a predefined default label value set is used to filter given values.
+// Values besides predefined values will count as `unknown` method.
+// `WithExtraMethods` can be used to add more methods to the set. The Observe
+// method of the Observer in the ObserverVec is called with the response size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
@@ -191,12 +226,18 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
-func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
+ mwOpts := &option{}
+ for _, o := range opts {
+ o(mwOpts)
+ }
+
code, method := checkLabels(obs)
+
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+ obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
})
}
@@ -290,7 +331,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
-func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
@@ -300,7 +341,7 @@ func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
labels["code"] = sanitizeCode(status)
}
if method {
- labels["method"] = sanitizeMethod(reqMethod)
+ labels["method"] = sanitizeMethod(reqMethod, extraMethods...)
}
return labels
@@ -330,7 +371,12 @@ func computeApproximateRequestSize(r *http.Request) int {
return s
}
-func sanitizeMethod(m string) string {
+// If the wrapped http.Handler has a known method, it will be sanitized and returned.
+// Otherwise, "unknown" will be returned. The known method list can be extended
+// as needed by using extraMethods parameter.
+func sanitizeMethod(m string, extraMethods ...string) string {
+ // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for
+ // the methods chosen as default.
switch m {
case "GET", "get":
return "get"
@@ -348,15 +394,25 @@ func sanitizeMethod(m string) string {
return "options"
case "NOTIFY", "notify":
return "notify"
+ case "TRACE", "trace":
+ return "trace"
+ case "PATCH", "patch":
+ return "patch"
default:
- return strings.ToLower(m)
+ for _, method := range extraMethods {
+ if strings.EqualFold(m, method) {
+ return strings.ToLower(m)
+ }
+ }
+ return "unknown"
}
}
// If the wrapped http.Handler has not set a status code, i.e. the value is
-// currently 0, santizeCode will return 200, for consistency with behavior in
+// currently 0, sanitizeCode will return 200, for consistency with behavior in
// the stdlib.
func sanitizeCode(s int) string {
+ // See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
switch s {
case 100:
return "100"
@@ -453,6 +509,9 @@ func sanitizeCode(s int) string {
return "511"
default:
- return strconv.Itoa(s)
+ if s >= 100 && s <= 599 {
+ return strconv.Itoa(s)
+ }
+ return "unknown"
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
new file mode 100644
index 000000000..35e41bd1e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
@@ -0,0 +1,31 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+// Option are used to configure a middleware or round tripper..
+type Option func(*option)
+
+type option struct {
+ extraMethods []string
+}
+
+// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
+// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
+//
+// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
+func WithExtraMethods(methods ...string) Option {
+ return func(o *option) {
+ o.extraMethods = methods
+ }
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 41649d267..3bb22a971 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -3,6 +3,7 @@ package assert
import (
"fmt"
"reflect"
+ "time"
)
type CompareType int
@@ -30,6 +31,8 @@ var (
float64Type = reflect.TypeOf(float64(1))
stringType = reflect.TypeOf("")
+
+ timeType = reflect.TypeOf(time.Time{})
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
@@ -299,6 +302,27 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
return compareLess, true
}
}
+ // Check for known struct types we can check for compare results.
+ case reflect.Struct:
+ {
+ // All structs enter here. We're not interested in most types.
+ if !canConvert(obj1Value, timeType) {
+ break
+ }
+
+ // time.Time can compared!
+ timeObj1, ok := obj1.(time.Time)
+ if !ok {
+ timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
+ }
+
+ timeObj2, ok := obj2.(time.Time)
+ if !ok {
+ timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
+ }
+
+ return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
+ }
}
return compareEqual, false
@@ -310,7 +334,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
// assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -320,7 +347,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
// assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// Less asserts that the first element is less than the second
@@ -329,7 +359,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
// assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -339,7 +372,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
// assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
- return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
// Positive asserts that the specified element is positive
@@ -347,8 +383,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
// assert.Positive(t, 1)
// assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
}
// Negative asserts that the specified element is negative
@@ -356,8 +395,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
// assert.Negative(t, -1)
// assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
new file mode 100644
index 000000000..df22c47fc
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
@@ -0,0 +1,16 @@
+//go:build go1.17
+// +build go1.17
+
+// TODO: once support for Go 1.16 is dropped, this file can be
+// merged/removed with assertion_compare_go1.17_test.go and
+// assertion_compare_legacy.go
+
+package assert
+
+import "reflect"
+
+// Wrapper around reflect.Value.CanConvert, for compatability
+// reasons.
+func canConvert(value reflect.Value, to reflect.Type) bool {
+ return value.CanConvert(to)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
new file mode 100644
index 000000000..1701af2a3
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
@@ -0,0 +1,16 @@
+//go:build !go1.17
+// +build !go1.17
+
+// TODO: once support for Go 1.16 is dropped, this file can be
+// merged/removed with assertion_compare_go1.17_test.go and
+// assertion_compare_can_convert.go
+
+package assert
+
+import "reflect"
+
+// Older versions of Go does not have the reflect.Value.CanConvert
+// method.
+func canConvert(value reflect.Value, to reflect.Type) bool {
+ return false
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 4dfd1229a..27e2420ed 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
}
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
+func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
+}
+
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 25337a6f0..d9ea368d0 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ..
return ErrorAsf(a.t, err, target, msg, args...)
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContains(err, expectedErrorSubString)
+func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContains(a.t, theError, contains, msgAndArgs...)
+}
+
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
+func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorContainsf(a.t, theError, contains, msg, args...)
+}
+
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 1c3b47182..759448783 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// IsNonIncreasing asserts that the collection is not increasing
@@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// IsDecreasing asserts that the collection is decreasing
@@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// IsNonDecreasing asserts that the collection is not decreasing
@@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+ return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index bcac4401f..0357b2231 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -718,10 +718,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte
// return (false, false) if impossible.
// return (true, false) if element was not found.
// return (true, true) if element was found.
-func includeElement(list interface{}, element interface{}) (ok, found bool) {
+func containsElement(list interface{}, element interface{}) (ok, found bool) {
listValue := reflect.ValueOf(list)
- listKind := reflect.TypeOf(list).Kind()
+ listType := reflect.TypeOf(list)
+ if listType == nil {
+ return false, false
+ }
+ listKind := listType.Kind()
defer func() {
if e := recover(); e != nil {
ok = false
@@ -764,7 +768,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
h.Helper()
}
- ok, found := includeElement(s, contains)
+ ok, found := containsElement(s, contains)
if !ok {
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
}
@@ -787,7 +791,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
h.Helper()
}
- ok, found := includeElement(s, contains)
+ ok, found := containsElement(s, contains)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
}
@@ -831,7 +835,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
- ok, found := includeElement(list, element)
+ ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
}
@@ -852,7 +856,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
h.Helper()
}
if subset == nil {
- return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+ return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
}
subsetValue := reflect.ValueOf(subset)
@@ -875,7 +879,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
for i := 0; i < subsetValue.Len(); i++ {
element := subsetValue.Index(i).Interface()
- ok, found := includeElement(list, element)
+ ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
}
@@ -1000,27 +1004,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
type PanicTestFunc func()
// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
-func didPanic(f PanicTestFunc) (bool, interface{}, string) {
-
- didPanic := false
- var message interface{}
- var stack string
- func() {
-
- defer func() {
- if message = recover(); message != nil {
- didPanic = true
- stack = string(debug.Stack())
- }
- }()
-
- // call the target function
- f()
+func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) {
+ didPanic = true
+ defer func() {
+ message = recover()
+ if didPanic {
+ stack = string(debug.Stack())
+ }
}()
- return didPanic, message, stack
+ // call the target function
+ f()
+ didPanic = false
+ return
}
// Panics asserts that the code inside the specified PanicTestFunc panics.
@@ -1161,11 +1159,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs
bf, bok := toFloat(actual)
if !aok || !bok {
- return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+ return Fail(t, "Parameters must be numerical", msgAndArgs...)
+ }
+
+ if math.IsNaN(af) && math.IsNaN(bf) {
+ return true
}
if math.IsNaN(af) {
- return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
+ return Fail(t, "Expected must not be NaN", msgAndArgs...)
}
if math.IsNaN(bf) {
@@ -1188,7 +1190,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn
if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
reflect.TypeOf(expected).Kind() != reflect.Slice {
- return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ return Fail(t, "Parameters must be slice", msgAndArgs...)
}
actualSlice := reflect.ValueOf(actual)
@@ -1250,8 +1252,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m
func calcRelativeError(expected, actual interface{}) (float64, error) {
af, aok := toFloat(expected)
- if !aok {
- return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+ bf, bok := toFloat(actual)
+ if !aok || !bok {
+ return 0, fmt.Errorf("Parameters must be numerical")
+ }
+ if math.IsNaN(af) && math.IsNaN(bf) {
+ return 0, nil
}
if math.IsNaN(af) {
return 0, errors.New("expected value must not be NaN")
@@ -1259,10 +1265,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
if af == 0 {
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
}
- bf, bok := toFloat(actual)
- if !bok {
- return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
- }
if math.IsNaN(bf) {
return 0, errors.New("actual value must not be NaN")
}
@@ -1298,7 +1300,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
reflect.TypeOf(expected).Kind() != reflect.Slice {
- return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ return Fail(t, "Parameters must be slice", msgAndArgs...)
}
actualSlice := reflect.ValueOf(actual)
@@ -1375,6 +1377,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
return true
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContains(t, err, expectedErrorSubString)
+func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !Error(t, theError, msgAndArgs...) {
+ return false
+ }
+
+ actual := theError.Error()
+ if !strings.Contains(actual, contains) {
+ return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...)
+ }
+
+ return true
+}
+
// matchRegexp return true if a specified regexp matches a string.
func matchRegexp(rx interface{}, str interface{}) bool {
@@ -1588,12 +1611,17 @@ func diff(expected interface{}, actual interface{}) string {
}
var e, a string
- if et != reflect.TypeOf("") {
- e = spewConfig.Sdump(expected)
- a = spewConfig.Sdump(actual)
- } else {
+
+ switch et {
+ case reflect.TypeOf(""):
e = reflect.ValueOf(expected).String()
a = reflect.ValueOf(actual).String()
+ case reflect.TypeOf(time.Time{}):
+ e = spewConfigStringerEnabled.Sdump(expected)
+ a = spewConfigStringerEnabled.Sdump(actual)
+ default:
+ e = spewConfig.Sdump(expected)
+ a = spewConfig.Sdump(actual)
}
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
@@ -1625,6 +1653,14 @@ var spewConfig = spew.ConfigState{
MaxDepth: 10,
}
+var spewConfigStringerEnabled = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+ MaxDepth: 10,
+}
+
type tHelper interface {
Helper()
}
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index 51820df2e..59c48277a 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
t.FailNow()
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContains(t, err, expectedErrorSubString)
+func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.ErrorContains(t, theError, contains, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
+func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.ErrorContainsf(t, theError, contains, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index ed54a9d83..5bb07c89c 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ..
ErrorAsf(a.t, err, target, msg, args...)
}
+// ErrorContains asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContains(err, expectedErrorSubString)
+func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ ErrorContains(a.t, theError, contains, msgAndArgs...)
+}
+
+// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
+// and that the error contains the specified substring.
+//
+// actualObj, err := SomeFunction()
+// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
+func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ ErrorContainsf(a.t, theError, contains, msg, args...)
+}
+
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index a224d3da5..62fa45067 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -69,7 +69,7 @@ github.com/containerd/containerd/log
github.com/containerd/containerd/pkg/userns
github.com/containerd/containerd/platforms
github.com/containerd/containerd/sys
-# github.com/containerd/stargz-snapshotter/estargz v0.11.2
+# github.com/containerd/stargz-snapshotter/estargz v0.11.3
github.com/containerd/stargz-snapshotter/estargz
github.com/containerd/stargz-snapshotter/estargz/errorutil
# github.com/containernetworking/cni v1.0.1
@@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.47.5-0.20220228211119-9880eb424fde
+# github.com/containers/common v0.47.5-0.20220318125043-0ededd18a1f9
## explicit
github.com/containers/common/libimage
github.com/containers/common/libimage/manifests
@@ -206,7 +206,7 @@ github.com/containers/image/v5/types
github.com/containers/image/v5/version
# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a
github.com/containers/libtrust
-# github.com/containers/ocicrypt v1.1.2
+# github.com/containers/ocicrypt v1.1.3
## explicit
github.com/containers/ocicrypt
github.com/containers/ocicrypt/blockcipher
@@ -233,7 +233,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.38.3-0.20220308085612-93ce26691863
+# github.com/containers/storage v1.38.3-0.20220321121613-8e565392dd91
## explicit
github.com/containers/storage
github.com/containers/storage/drivers
@@ -455,7 +455,7 @@ github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12
## explicit
github.com/json-iterator/go
-# github.com/klauspost/compress v1.15.0
+# github.com/klauspost/compress v1.15.1
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@@ -478,7 +478,7 @@ github.com/mattn/go-runewidth
github.com/mattn/go-shellwords
# github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369
github.com/matttproud/golang_protobuf_extensions/pbutil
-# github.com/miekg/pkcs11 v1.0.3
+# github.com/miekg/pkcs11 v1.1.1
github.com/miekg/pkcs11
# github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/mistifyio/go-zfs
@@ -603,7 +603,8 @@ github.com/pkg/errors
github.com/pmezard/go-difflib/difflib
# github.com/proglottis/gpgme v0.1.1
github.com/proglottis/gpgme
-# github.com/prometheus/client_golang v1.11.0
+# github.com/prometheus/client_golang v1.11.1
+## explicit
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
@@ -647,7 +648,7 @@ github.com/spf13/cobra
github.com/spf13/pflag
# github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980
github.com/stefanberger/go-pkcs11uri
-# github.com/stretchr/testify v1.7.0
+# github.com/stretchr/testify v1.7.1
## explicit
github.com/stretchr/testify/assert
github.com/stretchr/testify/require