summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/Microsoft/hcsshim/.golangci.yml3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/go.mod2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/go.sum3
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go16
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go56
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go44
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go10
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go2
-rw-r--r--vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go46
-rw-r--r--vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go9
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go2
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod2
-rw-r--r--vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum4
-rw-r--r--vendor/github.com/containers/buildah/.cirrus.yml62
-rw-r--r--vendor/github.com/containers/buildah/.gitignore4
-rw-r--r--vendor/github.com/containers/buildah/.golangci.yml19
-rw-r--r--vendor/github.com/containers/buildah/CHANGELOG.md156
-rw-r--r--vendor/github.com/containers/buildah/Makefile1
-rw-r--r--vendor/github.com/containers/buildah/bind/mount.go2
-rw-r--r--vendor/github.com/containers/buildah/changelog.txt155
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go8
-rw-r--r--vendor/github.com/containers/buildah/define/types.go2
-rw-r--r--vendor/github.com/containers/buildah/go.mod14
-rw-r--r--vendor/github.com/containers/buildah/go.sum116
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go4
-rw-r--r--vendor/github.com/containers/buildah/internal/parse/parse.go2
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go14
-rw-r--r--vendor/github.com/containers/buildah/pkg/overlay/overlay.go58
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go80
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go47
-rw-r--r--vendor/github.com/containers/buildah/util.go4
-rw-r--r--vendor/github.com/containers/buildah/util/util.go47
-rw-r--r--vendor/github.com/containers/common/libnetwork/cni/config.go2
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/config.go2
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/exec.go2
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/network.go16
-rw-r--r--vendor/github.com/containers/common/libnetwork/netavark/run.go5
-rw-r--r--vendor/github.com/containers/common/libnetwork/network/interface.go14
-rw-r--r--vendor/github.com/containers/common/pkg/config/config.go20
-rw-r--r--vendor/github.com/containers/common/pkg/config/containers.conf18
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go41
-rw-r--r--vendor/github.com/containers/common/pkg/config/default_linux.go6
-rw-r--r--vendor/github.com/containers/common/pkg/config/default_unsupported.go6
-rw-r--r--vendor/github.com/containers/common/pkg/config/default_windows.go6
-rw-r--r--vendor/github.com/containers/common/pkg/flag/flag.go2
-rw-r--r--vendor/github.com/containers/common/pkg/parse/parse.go20
-rw-r--r--vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go2
-rw-r--r--vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go33
-rw-r--r--vendor/github.com/containers/common/version/version.go2
-rw-r--r--vendor/github.com/containers/image/v5/copy/copy.go5
-rw-r--r--vendor/github.com/containers/image/v5/copy/sign.go4
-rw-r--r--vendor/github.com/containers/image/v5/manifest/common.go4
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go4
-rw-r--r--vendor/github.com/containers/image/v5/sif/load.go211
-rw-r--r--vendor/github.com/containers/image/v5/sif/src.go217
-rw-r--r--vendor/github.com/containers/image/v5/sif/transport.go164
-rw-r--r--vendor/github.com/containers/image/v5/signature/docker.go30
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism.go11
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go37
-rw-r--r--vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go12
-rw-r--r--vendor/github.com/containers/image/v5/signature/signature.go10
-rw-r--r--vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go2
-rw-r--r--vendor/github.com/containers/image/v5/version/version.go2
-rw-r--r--vendor/github.com/containers/psgo/go.mod5
-rw-r--r--vendor/github.com/containers/psgo/go.sum972
-rw-r--r--vendor/github.com/containers/psgo/internal/proc/ns.go16
-rw-r--r--vendor/github.com/containers/psgo/internal/proc/status.go102
-rw-r--r--vendor/github.com/containers/psgo/psgo.go35
-rw-r--r--vendor/github.com/containers/storage/Makefile3
-rw-r--r--vendor/github.com/containers/storage/VERSION2
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go1
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go13
-rw-r--r--vendor/github.com/containers/storage/go.mod12
-rw-r--r--vendor/github.com/containers/storage/go.sum30
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/cache_linux.go630
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go310
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go81
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/internal/compression.go32
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go961
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go36
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go6
-rw-r--r--vendor/github.com/containers/storage/store.go46
-rw-r--r--vendor/github.com/containers/storage/types/options.go41
-rw-r--r--vendor/github.com/docker/docker/opts/address_pools.go84
-rw-r--r--vendor/github.com/docker/docker/opts/env.go30
-rw-r--r--vendor/github.com/docker/docker/opts/hosts.go183
-rw-r--r--vendor/github.com/docker/docker/opts/hosts_unix.go11
-rw-r--r--vendor/github.com/docker/docker/opts/hosts_windows.go60
-rw-r--r--vendor/github.com/docker/docker/opts/ip.go47
-rw-r--r--vendor/github.com/docker/docker/opts/opts.go348
-rw-r--r--vendor/github.com/docker/docker/opts/quotedstring.go37
-rw-r--r--vendor/github.com/docker/docker/opts/runtime.go79
-rw-r--r--vendor/github.com/docker/docker/opts/ulimit.go81
-rw-r--r--vendor/github.com/docker/libnetwork/ipamutils/utils.go135
-rw-r--r--vendor/github.com/jinzhu/copier/copier.go125
-rw-r--r--vendor/github.com/klauspost/compress/.goreleaser.yml4
-rw-r--r--vendor/github.com/klauspost/compress/README.md14
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go169
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go2
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go199
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_code.go4
-rw-r--r--vendor/github.com/klauspost/compress/flate/inflate.go9
-rw-r--r--vendor/github.com/klauspost/compress/flate/token.go19
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go234
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bitreader.go15
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bitwriter.go22
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go24
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockenc.go108
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decodeheader.go84
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_base.go24
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go139
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder_options.go10
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder.go2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_encoder.go5
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s1
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s186
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go (renamed from vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go)8
-rw-r--r--vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go4
-rw-r--r--vendor/github.com/mtrmac/gpgme/go.mod3
-rw-r--r--vendor/github.com/proglottis/gpgme/.appveyor.yml (renamed from vendor/github.com/mtrmac/gpgme/.appveyor.yml)0
-rw-r--r--vendor/github.com/proglottis/gpgme/.gitignore (renamed from vendor/github.com/mtrmac/gpgme/.gitignore)0
-rw-r--r--vendor/github.com/proglottis/gpgme/.travis.yml (renamed from vendor/github.com/mtrmac/gpgme/.travis.yml)0
-rw-r--r--vendor/github.com/proglottis/gpgme/LICENSE (renamed from vendor/github.com/mtrmac/gpgme/LICENSE)0
-rw-r--r--vendor/github.com/proglottis/gpgme/README.md (renamed from vendor/github.com/mtrmac/gpgme/README.md)0
-rw-r--r--vendor/github.com/proglottis/gpgme/callbacks.go (renamed from vendor/github.com/mtrmac/gpgme/callbacks.go)0
-rw-r--r--vendor/github.com/proglottis/gpgme/data.go (renamed from vendor/github.com/mtrmac/gpgme/data.go)0
-rw-r--r--vendor/github.com/proglottis/gpgme/go.mod3
-rw-r--r--vendor/github.com/proglottis/gpgme/go_gpgme.c (renamed from vendor/github.com/mtrmac/gpgme/go_gpgme.c)0
-rw-r--r--vendor/github.com/proglottis/gpgme/go_gpgme.h (renamed from vendor/github.com/mtrmac/gpgme/go_gpgme.h)5
-rw-r--r--vendor/github.com/proglottis/gpgme/gpgme.go (renamed from vendor/github.com/mtrmac/gpgme/gpgme.go)36
-rw-r--r--vendor/github.com/proglottis/gpgme/unset_agent_info.go (renamed from vendor/github.com/mtrmac/gpgme/unset_agent_info.go)0
-rw-r--r--vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go (renamed from vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go)0
-rw-r--r--vendor/github.com/sylabs/sif/v2/LICENSE.md29
-rw-r--r--vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go69
-rw-r--r--vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go103
-rw-r--r--vendor/github.com/sylabs/sif/v2/pkg/sif/create.go680
-rw-r--r--vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go267
-rw-r--r--vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go300
-rw-r--r--vendor/github.com/sylabs/sif/v2/pkg/sif/load.go174
-rw-r--r--vendor/github.com/sylabs/sif/v2/pkg/sif/select.go210
-rw-r--r--vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go364
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/bar.go93
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go37
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go10
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/bar_option.go5
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go6
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go12
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/decor/percentage.go9
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/decor/size_type.go22
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/decor/speed.go3
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/go.mod2
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/go.sum4
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/progress.go34
-rw-r--r--vendor/github.com/vbauerster/mpb/v7/proxyreader.go45
156 files changed, 8849 insertions, 1433 deletions
diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml
index 16b25be55..2400e7f1e 100644
--- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml
+++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml
@@ -1,3 +1,6 @@
+run:
+ timeout: 8m
+
linters:
enable:
- stylecheck
diff --git a/vendor/github.com/Microsoft/hcsshim/go.mod b/vendor/github.com/Microsoft/hcsshim/go.mod
index 7c9747667..9c60dd302 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.mod
+++ b/vendor/github.com/Microsoft/hcsshim/go.mod
@@ -29,7 +29,7 @@ require (
go.opencensus.io v0.22.3
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20210510120138-977fb7262007
+ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
google.golang.org/grpc v1.40.0
)
diff --git a/vendor/github.com/Microsoft/hcsshim/go.sum b/vendor/github.com/Microsoft/hcsshim/go.sum
index 7c383806d..93c37657f 100644
--- a/vendor/github.com/Microsoft/hcsshim/go.sum
+++ b/vendor/github.com/Microsoft/hcsshim/go.sum
@@ -812,8 +812,9 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
index 644f0ab71..e21354ffd 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go
@@ -78,6 +78,13 @@ var (
// ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request")
+
+ // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped.
+ ErrProcessAlreadyStopped = syscall.Errno(0x8037011f)
+
+ // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that
+ // compute system has already been closed.
+ ErrInvalidHandle = syscall.Errno(0x6)
)
type ErrorEvent struct {
@@ -249,6 +256,14 @@ func IsNotExist(err error) bool {
err == ErrElementNotFound
}
+// IsErrorInvalidHandle checks whether the error is the result of an operation carried
+// out on a handle that is invalid/closed. This error popped up while trying to query
+// stats on a container in the process of being stopped.
+func IsErrorInvalidHandle(err error) bool {
+ err = getInnerError(err)
+ return err == ErrInvalidHandle
+}
+
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
// already closed by a call to the Close() method.
func IsAlreadyClosed(err error) bool {
@@ -281,6 +296,7 @@ func IsTimeout(err error) bool {
func IsAlreadyStopped(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeAlreadyStopped ||
+ err == ErrProcessAlreadyStopped ||
err == ErrElementNotFound
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
index 8f2034668..f4605922a 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
@@ -3,7 +3,9 @@ package hcs
import (
"context"
"encoding/json"
+ "errors"
"io"
+ "os"
"sync"
"syscall"
"time"
@@ -16,16 +18,17 @@ import (
// ContainerError is an error encountered in HCS
type Process struct {
- handleLock sync.RWMutex
- handle vmcompute.HcsProcess
- processID int
- system *System
- hasCachedStdio bool
- stdioLock sync.Mutex
- stdin io.WriteCloser
- stdout io.ReadCloser
- stderr io.ReadCloser
- callbackNumber uintptr
+ handleLock sync.RWMutex
+ handle vmcompute.HcsProcess
+ processID int
+ system *System
+ hasCachedStdio bool
+ stdioLock sync.Mutex
+ stdin io.WriteCloser
+ stdout io.ReadCloser
+ stderr io.ReadCloser
+ callbackNumber uintptr
+ killSignalDelivered bool
closedWaitOnce sync.Once
waitBlock chan struct{}
@@ -149,12 +152,45 @@ func (process *Process) Kill(ctx context.Context) (bool, error) {
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
+ if process.killSignalDelivered {
+ // A kill signal has already been sent to this process. Sending a second
+ // one offers no real benefit, as processes cannot stop themselves from
+ // being terminated, once a TerminateProcess has been issued. Sending a
+ // second kill may result in a number of errors (two of which detailed bellow)
+ // and which we can avoid handling.
+ return true, nil
+ }
+
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
+ if err != nil {
+ // We still need to check these two cases, as processes may still be killed by an
+ // external actor (human operator, OOM, random script etc).
+ if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) {
+ // There are two cases where it should be safe to ignore an error returned
+ // by HcsTerminateProcess. The first one is cause by the fact that
+ // HcsTerminateProcess ends up calling TerminateProcess in the context
+ // of a container. According to the TerminateProcess documentation:
+ // https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks
+ // After a process has terminated, call to TerminateProcess with open
+ // handles to the process fails with ERROR_ACCESS_DENIED (5) error code.
+ // It's safe to ignore this error here. HCS should always have permissions
+ // to kill processes inside any container. So an ERROR_ACCESS_DENIED
+ // is unlikely to be anything else than what the ending remarks in the
+ // documentation states.
+ //
+ // The second case is generated by hcs itself, if for any reason HcsTerminateProcess
+ // is called twice in a very short amount of time. In such cases, hcs may return
+ // HCS_E_PROCESS_ALREADY_STOPPED.
+ return true, nil
+ }
+ }
events := processHcsResult(ctx, resultJSON)
delivered, err := process.processSignalResult(ctx, err)
if err != nil {
err = makeProcessError(process, operation, err, events)
}
+
+ process.killSignalDelivered = delivered
return delivered, err
}
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go
new file mode 100644
index 000000000..def952541
--- /dev/null
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go
@@ -0,0 +1,44 @@
+package winapi
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1
+
+// CreatePseudoConsole creates a windows pseudo console.
+func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error {
+ // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
+ return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon)
+}
+
+// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`.
+func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error {
+ // We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
+ return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size))))
+}
+
+// HRESULT WINAPI CreatePseudoConsole(
+// _In_ COORD size,
+// _In_ HANDLE hInput,
+// _In_ HANDLE hOutput,
+// _In_ DWORD dwFlags,
+// _Out_ HPCON* phPC
+// );
+//
+//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole
+
+// void WINAPI ClosePseudoConsole(
+// _In_ HPCON hPC
+// );
+//
+//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole
+
+// HRESULT WINAPI ResizePseudoConsole(
+// _In_ HPCON hPC ,
+// _In_ COORD size
+// );
+//
+//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
index b87068327..37839435b 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
@@ -2,9 +2,7 @@ package winapi
const PROCESS_ALL_ACCESS uint32 = 2097151
-// DWORD GetProcessImageFileNameW(
-// HANDLE hProcess,
-// LPWSTR lpImageFileName,
-// DWORD nSize
-// );
-//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW
+const (
+ PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
+ PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
+)
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
index ec88c0d21..1d4ba3c4f 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
@@ -2,4 +2,4 @@
// be thought of as an extension to golang.org/x/sys/windows.
package winapi
-//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
+//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
index 59ddee274..4eb64b4c0 100644
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
@@ -37,12 +37,15 @@ func errnoErr(e syscall.Errno) error {
}
var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
- modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
+ procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole")
+ procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
+ procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole")
procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
procSearchPathW = modkernel32.NewProc("SearchPathW")
@@ -58,7 +61,6 @@ var (
procLogonUserW = modadvapi32.NewProc("LogonUserW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
- procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW")
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
@@ -71,6 +73,33 @@ var (
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
)
+func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
+ r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0)
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
+}
+
+func ClosePseudoConsole(hpc windows.Handle) {
+ syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0)
+ return
+}
+
+func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
+ r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0)
+ if int32(r0) < 0 {
+ if r0&0x1fff0000 == 0x00070000 {
+ r0 &= 0xffff
+ }
+ hr = syscall.Errno(r0)
+ }
+ return
+}
+
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
status = uint32(r0)
@@ -227,19 +256,6 @@ func LocalFree(ptr uintptr) {
return
}
-func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize))
- size = uint32(r0)
- if size == 0 {
- if e1 != 0 {
- err = errnoErr(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return
-}
-
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
amount = uint32(r0)
diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
index 49fb740cd..75dce5d82 100644
--- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
+++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
@@ -38,4 +38,13 @@ const (
// V21H1 corresponds to Windows Server 21H1 (semi-annual channel).
V21H1 = 19043
+
+ // V21H2Win10 corresponds to Windows 10 (November 2021 Update).
+ V21H2Win10 = 19044
+
+ // V21H2Server corresponds to Windows Server 2022 (ltsc2022).
+ V21H2Server = 20348
+
+ // V21H2Win11 corresponds to Windows 11 (original release).
+ V21H2Win11 = 22000
)
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
index e56319545..2c36e89b4 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
@@ -107,7 +107,7 @@ type Telemetry struct {
}
// Open opens a stargz file for reading.
-// The behaviour is configurable using options.
+// The behavior is configurable using options.
//
// Note that each entry name is normalized as the path that is relative to root.
func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
index 144d022ba..b82879fd7 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
@@ -3,7 +3,7 @@ module github.com/containerd/stargz-snapshotter/estargz
go 1.16
require (
- github.com/klauspost/compress v1.13.6
+ github.com/klauspost/compress v1.14.2
github.com/opencontainers/go-digest v1.0.0
github.com/pkg/errors v0.9.1
github.com/vbatts/tar-split v0.11.2
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
index d3c934ff8..20433e16b 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
@@ -1,8 +1,8 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
-github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
+github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml
index b3be43a2d..ec954630c 100644
--- a/vendor/github.com/containers/buildah/.cirrus.yml
+++ b/vendor/github.com/containers/buildah/.cirrus.yml
@@ -29,7 +29,7 @@ env:
PRIOR_FEDORA_NAME: "fedora-34"
UBUNTU_NAME: "ubuntu-2110"
- IMAGE_SUFFIX: "c6226133906620416"
+ IMAGE_SUFFIX: "c4560539387953152"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
@@ -128,13 +128,35 @@ vendor_task:
- './hack/tree_status.sh'
+# Confirm cross-compile ALL architectures on a Mac OS-X VM.
+cross_build_task:
+ name: "Cross Compile"
+ alias: cross_build
+ only_if: &not_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
+
+ osx_instance:
+ image: 'big-sur-base'
+
+ script:
+ - brew update
+ - brew install go
+ - brew install go-md2man
+ - brew install gpgme
+ - go version
+ - make cross CGO_ENABLED=0
+
+ binary_artifacts:
+ path: ./bin/*
+
+
unit_task:
name: 'Unit tests w/ $STORAGE_DRIVER'
alias: unit
- only_if: &not_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
- depends_on:
+ only_if: *not_docs
+ depends_on: &smoke_vendor_cross
- smoke
- vendor
+ - cross_build
timeout_in: 1h
@@ -156,8 +178,7 @@ conformance_task:
name: 'Build Conformance w/ $STORAGE_DRIVER'
alias: conformance
only_if: *not_docs
- depends_on:
- - unit
+ depends_on: *smoke_vendor_cross
gce_instance:
image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
@@ -174,36 +195,11 @@ conformance_task:
conformance_test_script: '${SCRIPT_BASE}/test.sh conformance |& ${_TIMESTAMP}'
-# Confirm cross-compile ALL architectures on a Mac OS-X VM.
-cross_build_task:
- name: "Cross Compile"
- alias: cross_build
- only_if: *not_docs
- depends_on:
- - unit
-
- osx_instance:
- image: 'big-sur-base'
-
- script:
- - brew update
- - brew install go
- - brew install go-md2man
- - brew install gpgme
- - go version
- - make cross CGO_ENABLED=0
-
- binary_artifacts:
- path: ./bin/*
-
-
integration_task:
name: "Integration $DISTRO_NV w/ $STORAGE_DRIVER"
alias: integration
only_if: *not_docs
- depends_on:
- - smoke
- - vendor
+ depends_on: *smoke_vendor_cross
matrix:
# VFS
@@ -259,9 +255,7 @@ in_podman_task:
name: "Containerized Integration"
alias: in_podman
only_if: *not_docs
- depends_on:
- - smoke
- - vendor
+ depends_on: *smoke_vendor_cross
env:
# This is key, cause the scripts to re-execute themselves inside a container.
diff --git a/vendor/github.com/containers/buildah/.gitignore b/vendor/github.com/containers/buildah/.gitignore
index 55bf4069c..939ce6ef5 100644
--- a/vendor/github.com/containers/buildah/.gitignore
+++ b/vendor/github.com/containers/buildah/.gitignore
@@ -4,9 +4,9 @@ docs/*.5
/buildah
/imgtype
/build/
-tests/tools/build
+/tests/tools/build
Dockerfile*
!/tests/bud/*/Dockerfile*
!/tests/conformance/**/Dockerfile*
*.swp
-result
+/result/
diff --git a/vendor/github.com/containers/buildah/.golangci.yml b/vendor/github.com/containers/buildah/.golangci.yml
index 0c7e31007..af0b10c76 100644
--- a/vendor/github.com/containers/buildah/.golangci.yml
+++ b/vendor/github.com/containers/buildah/.golangci.yml
@@ -7,18 +7,7 @@ run:
# Don't exceed number of threads available when running under CI
concurrency: 4
linters:
- enable-all: true
- disable:
- # All these break for one reason or another
- - dupl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - goconst
- - gocritic
- - gocyclo
- - gosec
- - lll
- - maligned
- - prealloc
- - scopelint
+ enable:
+ - revive
+ - unconvert
+ - unparam
diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md
index 1864a4564..c2e2014c3 100644
--- a/vendor/github.com/containers/buildah/CHANGELOG.md
+++ b/vendor/github.com/containers/buildah/CHANGELOG.md
@@ -2,6 +2,162 @@
# Changelog
+## v1.24.0 (2022-01-26)
+
+ Update vendor of containers/common
+ build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+ Github-workflow: Report both failures and errors.
+ build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0
+ Update docs/buildah-build.1.md
+ [CI:DOCS] Fix typos and improve language
+ buildah bud --network add support for custom networks
+ Make pull commands be consistent
+ docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing
+ build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0
+ Vendor in latest containers/image
+ Run codespell on code
+ .github/dependabot.yml: add tests/tools go.mod
+ CI: rm git-validation, add GHA job to validate PRs
+ tests/tools: bump go-md2man to v2.0.1
+ tests/tools/Makefile: simplify
+ tests/tools: bump onsi/ginkgo to v1.16.5
+ vendor: bump c/common and others
+ mount: add support for custom upper and workdir with overlay mounts
+ linux: fix lookup for runtime
+ overlay: add MountWithOptions to API which extends support for advanced overlay
+ Allow processing of SystemContext from FlagSet
+ .golangci.yml: enable unparam linter
+ util/resolveName: rm bool return
+ tests/tools: bump golangci-lint
+ .gitignore: fixups
+ all: fix capabilities.NewPid deprecation warnings
+ bind/mount.go: fix linter comment
+ all: fix gosimple warning S1039
+ tests/e2e/buildah_suite_test.go: fix gosimple warnings
+ imagebuildah/executor.go: fix gosimple warning
+ util.go: fix gosimple warning
+ build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0
+ Enable git-daemon tests
+ Allow processing of id options from FlagSet
+ Cirrus: Re-order tasks for more parallelism
+ Cirrus: Freshen VM images
+ Fix platform handling for empty os/arch values
+ Allow processing of network options from FlagSet
+ Fix permissions on secrets directory
+ Update containers/image and containers/common
+ bud.bats: use a local git daemon for the git protocol test
+ Allow processing of common options from FlagSet
+ Cirrus: Run int. tests in parallel with unit
+ vendor c/common
+ Fix default CNI paths
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7
+ multi-stage: enable mounting stages across each other with selinux enabled
+ executor: Share selinux label of first stage with other stages in a build
+ buildkit: add from field to bind and cache mounts so images can be used as source
+ Use config.ProxyEnv from containers/common
+ use libnetwork from c/common for networking
+ setup the netns in the buildah parent process
+ build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9
+ build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6
+ build: fix libsubid test
+ Allow callers to replace the ContainerSuffix
+ parse: allow parsing anomaly non-human value for memory control group
+ .cirrus: remove static_build from ci
+ stage_executor: re-use all possible layers from cache for squashed builds
+ build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0
+ Allow rootless buildah to set resource limits on cgroup V2
+ build(deps): bump github.com/docker/docker
+ tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification
+ build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3
+ Wire logger through to config
+ copier.Put: check for is-not-a-directory using lstat, not stat
+ Turn on rootless cgroupv2 tests
+ Grab all of the containers.conf settings for namespaces.
+ image: set MediaType in OCI manifests
+ copier: RemoveAll possibly-directories
+ Simple README fix
+ images: accept multiple filter with logical AND
+ build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1
+ UPdate vendor of container/storage
+ build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0
+ build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0
+ Make LocalIP public function so Podman can use it
+ Fix UnsetEnv for buildah bud
+ Tests should rely only on static/unchanging images
+ run: ensure that stdio pipes are labeled correctly
+ build(deps): bump github.com/docker/docker
+ Cirrus: Bump up to Fedora 35 & Ubuntu 21.10
+ chroot: don't use the generate default seccomp filter for unit tests
+ build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8
+ ssh-agent: Increase timeout before we explicitly close connection
+ docs/tutorials: update
+ Clarify that manifest defaults to localhost as the registry name
+ "config": remove a stray bit of debug output
+ "commit": fix a flag typo
+ Fix an error message: unlocking vs locking
+ Expand the godoc for CommonBuildOptions.Secrets
+ chroot: accept an "rw" option
+ Add --unsetenv option to buildah commit and build
+ define.TempDirForURL(): show CombinedOutput when a command fails
+ config: support the variant field
+ rootless: do not bind mount /sys if not needed
+ Fix tutorial to specify command on buildah run line
+ build: history should not contain ARG values
+ docs: Use guaranteed path for go-md2man
+ run: honor --network=none from builder if nothing specified
+ networkpolicy: Should be enabled instead of default when explictly set
+ Add support for env var secret sources
+ build(deps): bump github.com/docker/docker
+ fix: another non-portable shebang
+ Rootless containers users should use additional groups
+ Support overlayfs path contains colon
+ Report ignorefile location when no content added
+ Add support for host.containers.internal in the /etc/hosts
+ build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5
+ imagebuildah: fix nil deref
+ buildkit: add support for mount=type=cache
+ Default secret mode to 400
+ [CI:DOCS] Include manifest example usage
+ docs: update buildah-from, buildah-pull 'platform' option compatibility notes
+ docs: update buildah-build 'platform' option compatibility notes
+ De-dockerize the man page as much as possible
+ [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st
+ docs: Fix and Update Containerfile man page with supported mount types
+ mount: add tmpcopyup to tmpfs mount option
+ buildkit: Add support for --mount=type=tmpfs
+ build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1
+ Fix command doc links in README.md
+ build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1
+ build: Add support for buildkit like --mount=type=bind
+ Bump containerd to v1.5.7
+ build(deps): bump github.com/docker/docker
+ tests: stop pulling php, composer
+ Fix .containerignore link file
+ Cirrus: Fix defunct package metadata breaking cache
+ build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0
+ buildah build: add --all-platforms
+ Add man page for Containerfile and .containerignore
+ Plumb the remote logger throughut Buildah
+ Replace fmt.Sprintf("%d", x) with strconv.Itoa(x)
+ Run: Cleanup run directory after every RUN step
+ build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0
+ Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation
+ Makefile: check for `-race` using `-mod=vendor`
+ imagebuildah: fix an attempt to write to a nil map
+ push: support to specify the compression format
+ conformance: allow test cases to specify dockerUseBuildKit
+ build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0
+ build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1
+ unmarshalConvertedConfig(): handle zstd compression
+ tests/copy/copy: wire up compression options
+ Update to github.com/vbauerster/mpb v7.1.5
+ Add flouthoc to OWNERS
+ build: Add additional step nodes when labels are modified
+ Makefile: turn on race detection whenever it's available
+ conformance: add more tests for exclusion short-circuiting
+ Update VM Images + Drop prior-ubuntu testing
+ Bump to v1.24.0-dev
+
## v1.23.0 (2021-09-13)
Vendor in containers/common v0.44.0
diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile
index 196c60e29..f3d1ee817 100644
--- a/vendor/github.com/containers/buildah/Makefile
+++ b/vendor/github.com/containers/buildah/Makefile
@@ -111,7 +111,6 @@ codespell:
.PHONY: validate
validate: install.tools
./tests/validate/whitespace.sh
- ./tests/validate/git-validation.sh
./hack/xref-helpmsgs-manpages
./tests/validate/pr-should-include-tests
./tests/validate/buildahimages-are-sane
diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go
index 789233405..0e45d12c2 100644
--- a/vendor/github.com/containers/buildah/bind/mount.go
+++ b/vendor/github.com/containers/buildah/bind/mount.go
@@ -270,7 +270,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
}
return errors.Wrapf(err, "error checking if %q is mounted", mount.Mountpoint)
}
- if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { // nolint:unconvert (required for some OS/arch combinations)
+ if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations)
logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint)
continue
}
diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt
index 24a101016..bb8d03b74 100644
--- a/vendor/github.com/containers/buildah/changelog.txt
+++ b/vendor/github.com/containers/buildah/changelog.txt
@@ -1,3 +1,158 @@
+- Changelog for v1.24.0 (2022-01-26)
+ * Update vendor of containers/common
+ * build(deps): bump github.com/golangci/golangci-lint in /tests/tools
+ * Github-workflow: Report both failures and errors.
+ * build(deps): bump github.com/containers/image/v5 from 5.18.0 to 5.19.0
+ * Update docs/buildah-build.1.md
+ * [CI:DOCS] Fix typos and improve language
+ * buildah bud --network add support for custom networks
+ * Make pull commands be consistent
+ * docs/buildah-build.1.md: don't imply that -v isn't just a RUN thing
+ * build(deps): bump github.com/onsi/gomega from 1.17.0 to 1.18.0
+ * Vendor in latest containers/image
+ * Run codespell on code
+ * .github/dependabot.yml: add tests/tools go.mod
+ * CI: rm git-validation, add GHA job to validate PRs
+ * tests/tools: bump go-md2man to v2.0.1
+ * tests/tools/Makefile: simplify
+ * tests/tools: bump onsi/ginkgo to v1.16.5
+ * vendor: bump c/common and others
+ * mount: add support for custom upper and workdir with overlay mounts
+ * linux: fix lookup for runtime
+ * overlay: add MountWithOptions to API which extends support for advanced overlay
+ * Allow processing of SystemContext from FlagSet
+ * .golangci.yml: enable unparam linter
+ * util/resolveName: rm bool return
+ * tests/tools: bump golangci-lint
+ * .gitignore: fixups
+ * all: fix capabilities.NewPid deprecation warnings
+ * bind/mount.go: fix linter comment
+ * all: fix gosimple warning S1039
+ * tests/e2e/buildah_suite_test.go: fix gosimple warnings
+ * imagebuildah/executor.go: fix gosimple warning
+ * util.go: fix gosimple warning
+ * build(deps): bump github.com/opencontainers/runc from 1.0.3 to 1.1.0
+ * Enable git-daemon tests
+ * Allow processing of id options from FlagSet
+ * Cirrus: Re-order tasks for more parallelism
+ * Cirrus: Freshen VM images
+ * Fix platform handling for empty os/arch values
+ * Allow processing of network options from FlagSet
+ * Fix permissions on secrets directory
+ * Update containers/image and containers/common
+ * bud.bats: use a local git daemon for the git protocol test
+ * Allow processing of common options from FlagSet
+ * Cirrus: Run int. tests in parallel with unit
+ * vendor c/common
+ * Fix default CNI paths
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.6 to 1.7.7
+ * multi-stage: enable mounting stages across each other with selinux enabled
+ * executor: Share selinux label of first stage with other stages in a build
+ * buildkit: add from field to bind and cache mounts so images can be used as source
+ * Use config.ProxyEnv from containers/common
+ * use libnetwork from c/common for networking
+ * setup the netns in the buildah parent process
+ * build(deps): bump github.com/containerd/containerd from 1.5.8 to 1.5.9
+ * build(deps): bump github.com/fsouza/go-dockerclient from 1.7.4 to 1.7.6
+ * build: fix libsubid test
+ * Allow callers to replace the ContainerSuffix
+ * parse: allow parsing anomaly non-human value for memory control group
+ * .cirrus: remove static_build from ci
+ * stage_executor: re-use all possible layers from cache for squashed builds
+ * build(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0
+ * Allow rootless buildah to set resource limits on cgroup V2
+ * build(deps): bump github.com/docker/docker
+ * tests: move buildkit mount tests files from TESTSDIR to TESTDIR before modification
+ * build(deps): bump github.com/opencontainers/runc from 1.0.2 to 1.0.3
+ * Wire logger through to config
+ * copier.Put: check for is-not-a-directory using lstat, not stat
+ * Turn on rootless cgroupv2 tests
+ * Grab all of the containers.conf settings for namespaces.
+ * image: set MediaType in OCI manifests
+ * copier: RemoveAll possibly-directories
+ * Simple README fix
+ * images: accept multiple filter with logical AND
+ * build(deps): bump github.com/containernetworking/cni from 0.8.1 to 1.0.1
+ * UPdate vendor of container/storage
+ * build(deps): bump github.com/onsi/gomega from 1.16.0 to 1.17.0
+ * build(deps): bump github.com/containers/image/v5 from 5.16.1 to 5.17.0
+ * Make LocalIP public function so Podman can use it
+ * Fix UnsetEnv for buildah bud
+ * Tests should rely only on static/unchanging images
+ * run: ensure that stdio pipes are labeled correctly
+ * build(deps): bump github.com/docker/docker
+ * Cirrus: Bump up to Fedora 35 & Ubuntu 21.10
+ * chroot: don't use the generate default seccomp filter for unit tests
+ * build(deps): bump github.com/containerd/containerd from 1.5.7 to 1.5.8
+ * ssh-agent: Increase timeout before we explicitly close connection
+ * docs/tutorials: update
+ * Clarify that manifest defaults to localhost as the registry name
+ * "config": remove a stray bit of debug output
+ * "commit": fix a flag typo
+ * Fix an error message: unlocking vs locking
+ * Expand the godoc for CommonBuildOptions.Secrets
+ * chroot: accept an "rw" option
+ * Add --unsetenv option to buildah commit and build
+ * define.TempDirForURL(): show CombinedOutput when a command fails
+ * config: support the variant field
+ * rootless: do not bind mount /sys if not needed
+ * Fix tutorial to specify command on buildah run line
+ * build: history should not contain ARG values
+ * docs: Use guaranteed path for go-md2man
+ * run: honor --network=none from builder if nothing specified
+ * networkpolicy: Should be enabled instead of default when explictly set
+ * Add support for env var secret sources
+ * build(deps): bump github.com/docker/docker
+ * fix: another non-portable shebang
+ * Rootless containers users should use additional groups
+ * Support overlayfs path contains colon
+ * Report ignorefile location when no content added
+ * Add support for host.containers.internal in the /etc/hosts
+ * build(deps): bump github.com/onsi/ginkgo from 1.16.4 to 1.16.5
+ * imagebuildah: fix nil deref
+ * buildkit: add support for mount=type=cache
+ * Default secret mode to 400
+ * [CI:DOCS] Include manifest example usage
+ * docs: update buildah-from, buildah-pull 'platform' option compatibility notes
+ * docs: update buildah-build 'platform' option compatibility notes
+ * De-dockerize the man page as much as possible
+ * [CI:DOCS] Touch up Containerfile man page to show ARG can be 1st
+ * docs: Fix and Update Containerfile man page with supported mount types
+ * mount: add tmpcopyup to tmpfs mount option
+ * buildkit: Add support for --mount=type=tmpfs
+ * build(deps): bump github.com/opencontainers/selinux from 1.8.5 to 1.9.1
+ * Fix command doc links in README.md
+ * build(deps): bump github.com/containers/image/v5 from 5.16.0 to 5.16.1
+ * build: Add support for buildkit like --mount=type=bind
+ * Bump containerd to v1.5.7
+ * build(deps): bump github.com/docker/docker
+ * tests: stop pulling php, composer
+ * Fix .containerignore link file
+ * Cirrus: Fix defunct package metadata breaking cache
+ * build(deps): bump github.com/containers/storage from 1.36.0 to 1.37.0
+ * buildah build: add --all-platforms
+ * Add man page for Containerfile and .containerignore
+ * Plumb the remote logger throughut Buildah
+ * Replace fmt.Sprintf("%d", x) with strconv.Itoa(x)
+ * Run: Cleanup run directory after every RUN step
+ * build(deps): bump github.com/containers/common from 0.45.0 to 0.46.0
+ * Makefile: adjust -ldflags/-gcflags/-gccgoflags depending on the go implementation
+ * Makefile: check for `-race` using `-mod=vendor`
+ * imagebuildah: fix an attempt to write to a nil map
+ * push: support to specify the compression format
+ * conformance: allow test cases to specify dockerUseBuildKit
+ * build(deps): bump github.com/containers/common from 0.44.1 to 0.45.0
+ * build(deps): bump github.com/containers/common from 0.44.0 to 0.44.1
+ * unmarshalConvertedConfig(): handle zstd compression
+ * tests/copy/copy: wire up compression options
+ * Update to github.com/vbauerster/mpb v7.1.5
+ * Add flouthoc to OWNERS
+ * build: Add additional step nodes when labels are modified
+ * Makefile: turn on race detection whenever it's available
+ * conformance: add more tests for exclusion short-circuiting
+ * Update VM Images + Drop prior-ubuntu testing
+ * Bump to v1.24.0-dev
+
- Changelog for v1.23.0 (2021-09-13)
* Vendor in containers/common v0.44.0
* build(deps): bump github.com/containers/storage from 1.35.0 to 1.36.0
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index cf0d9871a..a373c4053 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -883,11 +883,14 @@ func setApparmorProfile(spec *specs.Spec) error {
// setCapabilities sets capabilities for ourselves, to be more or less inherited by any processes that we'll start.
func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
- currentCaps, err := capability.NewPid(0)
+ currentCaps, err := capability.NewPid2(0)
if err != nil {
return errors.Wrapf(err, "error reading capabilities of current process")
}
- caps, err := capability.NewPid(0)
+ if err := currentCaps.Load(); err != nil {
+ return errors.Wrapf(err, "error loading capabilities")
+ }
+ caps, err := capability.NewPid2(0)
if err != nil {
return errors.Wrapf(err, "error reading capabilities of current process")
}
@@ -899,7 +902,6 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
capability.AMBIENT: spec.Process.Capabilities.Ambient,
}
knownCaps := capability.List()
- caps.Clear(capability.CAPS | capability.BOUNDS | capability.AMBS)
for capType, capList := range capMap {
for _, capToSet := range capList {
cap := capability.CAP_LAST_CAP
diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go
index 5bbb1bbac..3d80c0160 100644
--- a/vendor/github.com/containers/buildah/define/types.go
+++ b/vendor/github.com/containers/buildah/define/types.go
@@ -29,7 +29,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.24.0-dev"
+ Version = "1.24.0"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"
diff --git a/vendor/github.com/containers/buildah/go.mod b/vendor/github.com/containers/buildah/go.mod
index 6dd50e3ae..57193e63e 100644
--- a/vendor/github.com/containers/buildah/go.mod
+++ b/vendor/github.com/containers/buildah/go.mod
@@ -5,10 +5,10 @@ go 1.13
require (
github.com/containerd/containerd v1.5.9
github.com/containernetworking/cni v1.0.1
- github.com/containers/common v0.46.1-0.20220110165509-08c2c97e5e25
- github.com/containers/image/v5 v5.18.0
+ github.com/containers/common v0.47.1
+ github.com/containers/image/v5 v5.19.0
github.com/containers/ocicrypt v1.1.2
- github.com/containers/storage v1.37.1-0.20211122164443-82b8f06bfc08
+ github.com/containers/storage v1.38.1
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.12+incompatible
github.com/docker/go-units v0.4.0
@@ -20,16 +20,16 @@ require (
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
github.com/mattn/go-shellwords v1.0.12
github.com/onsi/ginkgo v1.16.5
- github.com/onsi/gomega v1.17.0
+ github.com/onsi/gomega v1.18.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
- github.com/opencontainers/runc v1.0.3
+ github.com/opencontainers/runc v1.1.0
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/runtime-tools v0.9.0
github.com/opencontainers/selinux v1.10.0
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656
github.com/pkg/errors v0.9.1
- github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf
+ github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.3.0
github.com/spf13/pflag v1.0.5
@@ -38,7 +38,7 @@ require (
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d
+ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
k8s.io/klog v1.0.0 // indirect
)
diff --git a/vendor/github.com/containers/buildah/go.sum b/vendor/github.com/containers/buildah/go.sum
index 257af4905..d5c35c139 100644
--- a/vendor/github.com/containers/buildah/go.sum
+++ b/vendor/github.com/containers/buildah/go.sum
@@ -64,8 +64,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
-github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
+github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
@@ -88,14 +88,16 @@ github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
-github.com/Microsoft/hcsshim v0.9.1 h1:VfDCj+QnY19ktX5TsH22JHcjaZ05RWQiwDbOyEg5ziM=
-github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM=
+github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
+github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
+github.com/ProtonMail/go-crypto v0.0.0-20210920160938-87db9fbc61c7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
+github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
@@ -105,12 +107,14 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
+github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
@@ -118,6 +122,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -131,6 +136,7 @@ github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edY
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
@@ -148,6 +154,7 @@ github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cb
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
@@ -159,6 +166,7 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@@ -193,6 +201,7 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -239,7 +248,6 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
-github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
github.com/containerd/stargz-snapshotter/estargz v0.10.1 h1:hd1EoVjI2Ax8Cr64tdYqnJ4i4pZU49FkEf5kU8KxQng=
github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
@@ -266,11 +274,10 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.0.1 h1:wwCfYbTCj5FC0EJgyzyjTXmqysOiJE9r712Z+2KVZAk=
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
-github.com/containers/common v0.46.1-0.20220110165509-08c2c97e5e25 h1:LwpIG1dHUvMyuarbmR+KMLi4EF3Ca0afNw15KHN3rDM=
-github.com/containers/common v0.46.1-0.20220110165509-08c2c97e5e25/go.mod h1:hXUU9gtA8V9dSLHhizp/k/s0ZXBzrnUSScUfrsw8z2Y=
-github.com/containers/image/v5 v5.17.1-0.20220106205022-73f80d60f0e1/go.mod h1:daAiRXgcGIf/7eD7B2EkuHHw084/8M8Kh35rzOu56y0=
-github.com/containers/image/v5 v5.18.0 h1:YbvpXl5zd6IbZnt4XiOU0+c24xBQAQL9q3/e5kyk19k=
-github.com/containers/image/v5 v5.18.0/go.mod h1:ybujPwS7YEAPhLXJ3vvZGdKftk+sPSvp/djg9qTPvro=
+github.com/containers/common v0.47.1 h1:/TKIvnIDXvy9VdzWj1L9WLhe3ZEX9sRaA6L0It8ZyTM=
+github.com/containers/common v0.47.1/go.mod h1:vRjkTRres+O4i1k5brS7k2IV8uuiluCzmFRTMRZoyts=
+github.com/containers/image/v5 v5.19.0 h1:aEwc33qYjr2MpmBZlOgkCLu6dH465JTpWnFUCrjAXiQ=
+github.com/containers/image/v5 v5.19.0/go.mod h1:fpLUfLBBZpolJ0XOh7e/K0jkDuM4/8N19mfUgXfGo3Q=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -278,10 +285,9 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.2 h1:Ez+GAMP/4GLix5Ywo/fL7O0nY771gsBIigiqUm1aXz0=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
-github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
-github.com/containers/storage v1.37.1-0.20211119174841-bf170b3ddac0/go.mod h1:XjCNlt5JUUmRuTJXhFxHb9hHGPho7DNg3o4N/14prdQ=
-github.com/containers/storage v1.37.1-0.20211122164443-82b8f06bfc08 h1:KXVX/ZD4h0ysiz/E3oU5HrWnM9WkI6NGgliPC8IxoVk=
-github.com/containers/storage v1.37.1-0.20211122164443-82b8f06bfc08/go.mod h1:hvKpaiPRALDI7oz4Jx+AEch8iS/viRnc22HPilQROWU=
+github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc=
+github.com/containers/storage v1.38.1 h1:gg7YhHcxER0sh+kjXQcCZD6enu4VwAzliXq9JyM0g/Q=
+github.com/containers/storage v1.38.1/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@@ -357,6 +363,7 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -372,6 +379,7 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -385,6 +393,12 @@ github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYis
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
+github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
+github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
+github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
+github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -496,6 +510,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
@@ -579,8 +594,10 @@ github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee h1:PAXLXk1heNZ5y
github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
-github.com/jinzhu/copier v0.3.4 h1:mfU6jI9PtCeUjkjQ322dlff9ELjGDu975C2p/nrubVI=
-github.com/jinzhu/copier v0.3.4/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
+github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
+github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
@@ -596,14 +613,17 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds=
+github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -614,6 +634,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
@@ -622,6 +643,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
+github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -631,6 +653,7 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7
github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA=
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -689,14 +712,11 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/mtrmac/gpgme v0.1.2 h1:dNOmvYmsrakgW7LcgiprD0yfRuQQe8/C8F6Z+zogO3s=
-github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
@@ -716,6 +736,8 @@ github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1ls
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -725,8 +747,9 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
-github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.0 h1:ngbYoRctxjl8SiF7XgP0NxBFbfHcg3wfHMMaFHWwMTM=
+github.com/onsi/gomega v1.18.0/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -741,8 +764,9 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k=
github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
+github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -756,8 +780,6 @@ github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pK
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
-github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
-github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU=
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/openshift/imagebuilder v1.2.2-0.20210415181909-87f3e48c2656 h1:WaxyNFpmIDu4i6so9r6LVFIbSaXqsj8oitMitt86ae4=
@@ -781,6 +803,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/proglottis/gpgme v0.1.1 h1:72xI0pt/hy7pqsRxk32KExITkXp+RZErRizsA+up/lQ=
+github.com/proglottis/gpgme v0.1.1/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@@ -831,9 +855,15 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
+github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf h1:b0+ZBD3rohnkQ4q5duD1+RyTXTg9yk+qTOPMSQtapO0=
-github.com/seccomp/libseccomp-golang v0.9.2-0.20200616122406-847368b35ebf/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
+github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -877,6 +907,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo=
+github.com/sylabs/sif/v2 v2.3.1 h1:NHoc/rZpnOS05etmT+j8IJOZP2Cc8zHHG8rKSVosvZs=
+github.com/sylabs/sif/v2 v2.3.1/go.mod h1:NnvveH62GiibimL00MrI6YYcZfb7DnZMcRo/40giY+0=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
@@ -897,8 +930,8 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
-github.com/vbauerster/mpb/v7 v7.3.0 h1:WwRtHHT26gjVln0yJypDEEpTWyX9sk4QcUxM6tQjdEc=
-github.com/vbauerster/mpb/v7 v7.3.0/go.mod h1:KERDXx9bfuStUwTH2FbsrJhJhVu1q+xmjjoCZMZrin4=
+github.com/vbauerster/mpb/v7 v7.3.2 h1:tCuxMy8G9cLdjb61b6wO7I1vRT/LyMEzRbr3xCC0JPU=
+github.com/vbauerster/mpb/v7 v7.3.2/go.mod h1:wfxIZcOJq/bG1/lAtfzMXcOiSvbqVi/5GX5WCSi+IsA=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
@@ -911,6 +944,8 @@ github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvV
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
+github.com/xanzy/ssh-agent v0.3.1/go.mod h1:QIE4lCeL7nkC25x+yA3LBIYfwCc1TFziCtG7cBAac6w=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA=
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -959,6 +994,7 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -971,7 +1007,10 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1058,13 +1097,14 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1184,6 +1224,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1193,16 +1234,19 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211001092434-39dca1131b70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d h1:1oIt9o40TWWI9FUaveVpUvBe13FNqBNVXy3ue2fcfkw=
-golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -1394,7 +1438,6 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211005153810-c76a74d43a8e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
@@ -1433,7 +1476,6 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
@@ -1457,8 +1499,9 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@@ -1473,6 +1516,7 @@ gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index aa33277f3..42cd3cf11 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -304,9 +304,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
// and value, or just an argument, since they can be
// separated by either "=" or whitespace.
list := strings.SplitN(arg.Value, "=", 2)
- if _, stillUnused := exec.unusedArgs[list[0]]; stillUnused {
- delete(exec.unusedArgs, list[0])
- }
+ delete(exec.unusedArgs, list[0])
}
}
break
diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go
index 23e6aa884..8085cd097 100644
--- a/vendor/github.com/containers/buildah/internal/parse/parse.go
+++ b/vendor/github.com/containers/buildah/internal/parse/parse.go
@@ -316,7 +316,7 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
UID: uid,
GID: gid,
}
- //buildkit parity: change uid and gid if specificed otheriwise keep `0`
+ //buildkit parity: change uid and gid if specified otheriwise keep `0`
err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair)
if err != nil {
return newMount, errors.Wrapf(err, "Unable to change uid,gid of cache directory")
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 704fa8b42..d05fbde7c 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -69,7 +69,7 @@ type BudResults struct {
Manifest string
NoCache bool
Timestamp int64
- Pull bool
+ Pull string
PullAlways bool
PullNever bool
Quiet bool
@@ -171,7 +171,7 @@ func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions {
func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.BoolVar(&flags.ForceRm, "force-rm", false, "Always remove intermediate containers after a build, even if the build is unsuccessful.")
- fs.BoolVar(&flags.Layers, "layers", UseLayers(), fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override."))
+ fs.BoolVar(&flags.Layers, "layers", UseLayers(), "cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override.")
return fs
}
@@ -214,9 +214,16 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs.StringVar(&flags.Manifest, "manifest", "", "add the image to the specified manifest list. Creates manifest list if it does not exist")
fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.String("os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
- fs.BoolVar(&flags.Pull, "pull", true, "pull the image from the registry if newer or not present in store, if false, only pull the image if not present")
+ fs.StringVar(&flags.Pull, "pull", "true", "pull the image from the registry if newer or not present in store, if false, only pull the image if not present, if always, pull the image even if the named image is present in store, if never, only use the image present in store if available")
+ fs.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected.
fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store")
+ if err := fs.MarkHidden("pull-always"); err != nil {
+ panic(fmt.Sprintf("error marking the pull-always flag as hidden: %v", err))
+ }
fs.BoolVar(&flags.PullNever, "pull-never", false, "do not pull the image, use the image present in store if available")
+ if err := fs.MarkHidden("pull-never"); err != nil {
+ panic(fmt.Sprintf("error marking the pull-never flag as hidden: %v", err))
+ }
fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build")
// "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/build.go.
@@ -259,6 +266,7 @@ func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion["logfile"] = commonComp.AutocompleteDefault
flagCompletion["manifest"] = commonComp.AutocompleteDefault
flagCompletion["os"] = commonComp.AutocompleteNone
+ flagCompletion["pull"] = commonComp.AutocompleteDefault
flagCompletion["runtime-flag"] = commonComp.AutocompleteNone
flagCompletion["secret"] = commonComp.AutocompleteNone
flagCompletion["ssh"] = commonComp.AutocompleteNone
diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
index e292fe353..fcec3d079 100644
--- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
+++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
@@ -18,6 +18,40 @@ import (
"golang.org/x/sys/unix"
)
+// Options type holds various configuration options for overlay
+// MountWithOptions accepts following type so it is easier to specify
+// more verbose configuration for overlay mount.
+type Options struct {
+ // The Upper directory is normally writable layer in an overlay mount.
+ // Note!! : Following API does not handles escaping or validates correctness of the values
+ // passed to UpperDirOptionFragment instead API will try to pass values as is it
+ // to the `mount` command. It is user's responsibility to make sure they pre-validate
+ // these values. Invalid inputs may lead to undefined behviour.
+ // This is provided as-is, use it if it works for you, we can/will change/break that in the future.
+ // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959
+ // TODO: Should we address above comment and handle escaping of metacharacters like
+ // `comma`, `backslash` ,`colon` and any other special characters
+ UpperDirOptionFragment string
+ // The Workdir is used to prepare files as they are switched between the layers.
+ // Note!! : Following API does not handles escaping or validates correctness of the values
+ // passed to WorkDirOptionFragment instead API will try to pass values as is it
+ // to the `mount` command. It is user's responsibility to make sure they pre-validate
+ // these values. Invalid inputs may lead to undefined behviour.
+ // This is provided as-is, use it if it works for you, we can/will change/break that in the future.
+ // See discussion here for more context: https://github.com/containers/buildah/pull/3715#discussion_r786036959
+ // TODO: Should we address above comment and handle escaping of metacharacters like
+ // `comma`, `backslash` ,`colon` and any other special characters
+ WorkDirOptionFragment string
+ // Graph options relayed from podman, will be responsible for choosing mount program
+ GraphOpts []string
+ // Mark if following overlay is read only
+ ReadOnly bool
+ // RootUID is not used yet but keeping it here for legacy reasons.
+ RootUID int
+ // RootGID is not used yet but keeping it here for legacy reasons.
+ RootGID int
+}
+
// TempDir generates an overlay Temp directory in the container content
func TempDir(containerDir string, rootUID, rootGID int) (string, error) {
contentDir := filepath.Join(containerDir, "overlay")
@@ -65,7 +99,8 @@ func generateOverlayStructure(containerDir string, rootUID, rootGID int) (string
// from the source system. It then mounts up the source directory on to the
// generated mount point and returns the mount point to the caller.
func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) {
- return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, false)
+ overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: false, RootUID: rootUID, RootGID: rootGID}
+ return MountWithOptions(contentDir, source, dest, &overlayOpts)
}
// MountReadOnly creates a subdir of the contentDir based on the source directory
@@ -73,16 +108,21 @@ func Mount(contentDir, source, dest string, rootUID, rootGID int, graphOptions [
// generated mount point and returns the mount point to the caller. Note that no
// upper layer will be created rendering it a read-only mount
func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphOptions []string) (mount specs.Mount, Err error) {
- return mountHelper(contentDir, source, dest, rootUID, rootGID, graphOptions, true)
+ overlayOpts := Options{GraphOpts: graphOptions, ReadOnly: true, RootUID: rootUID, RootGID: rootGID}
+ return MountWithOptions(contentDir, source, dest, &overlayOpts)
}
-// NOTE: rootUID and rootUID are not yet used.
-func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []string, readOnly bool) (mount specs.Mount, Err error) {
+// MountWithOptions creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounts up the source directory on to the
+// generated mount point and returns the mount point to the caller.
+// But allows api to set custom workdir, upperdir and other overlay options
+// Following API is being used by podman at the moment
+func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
mergeDir := filepath.Join(contentDir, "merge")
// Create overlay mount options for rw/ro.
var overlayOptions string
- if readOnly {
+ if opts.ReadOnly {
// Read-only overlay mounts require two lower layer.
lowerTwo := filepath.Join(contentDir, "lower")
if err := os.Mkdir(lowerTwo, 0755); err != nil {
@@ -93,6 +133,12 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin
// Read-write overlay mounts want a lower, upper and a work layer.
workDir := filepath.Join(contentDir, "work")
upperDir := filepath.Join(contentDir, "upper")
+
+ if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" {
+ workDir = opts.WorkDirOptionFragment
+ upperDir = opts.UpperDirOptionFragment
+ }
+
st, err := os.Stat(source)
if err != nil {
return mount, err
@@ -117,7 +163,7 @@ func mountHelper(contentDir, source, dest string, _, _ int, graphOptions []strin
"overlay2.mount_program": true,
}
- for _, i := range graphOptions {
+ for _, i := range opts.GraphOpts {
s := strings.SplitN(i, "=", 2)
if len(s) != 2 {
continue
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index fb1aea2ee..b57b36a62 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -441,20 +441,26 @@ func validateIPAddress(val string) (string, error) {
// SystemContextFromOptions returns a SystemContext populated with values
// per the input parameters provided by the caller for the use in authentication.
func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) {
- certDir, err := c.Flags().GetString("cert-dir")
+ return SystemContextFromFlagSet(c.Flags(), c.Flag)
+}
+
+// SystemContextFromFlagSet returns a SystemContext populated with values
+// per the input parameters provided by the caller for the use in authentication.
+func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (*types.SystemContext, error) {
+ certDir, err := flags.GetString("cert-dir")
if err != nil {
certDir = ""
}
ctx := &types.SystemContext{
DockerCertPath: certDir,
}
- tlsVerify, err := c.Flags().GetBool("tls-verify")
- if err == nil && c.Flag("tls-verify").Changed {
+ tlsVerify, err := flags.GetBool("tls-verify")
+ if err == nil && findFlagFunc("tls-verify").Changed {
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!tlsVerify)
ctx.OCIInsecureSkipTLSVerify = !tlsVerify
ctx.DockerDaemonInsecureSkipTLSVerify = !tlsVerify
}
- disableCompression, err := c.Flags().GetBool("disable-compression")
+ disableCompression, err := flags.GetBool("disable-compression")
if err == nil {
if disableCompression {
ctx.OCIAcceptUncompressedLayers = true
@@ -462,59 +468,59 @@ func SystemContextFromOptions(c *cobra.Command) (*types.SystemContext, error) {
ctx.DirForceCompress = true
}
}
- creds, err := c.Flags().GetString("creds")
- if err == nil && c.Flag("creds").Changed {
+ creds, err := flags.GetString("creds")
+ if err == nil && findFlagFunc("creds").Changed {
var err error
ctx.DockerAuthConfig, err = AuthConfig(creds)
if err != nil {
return nil, err
}
}
- sigPolicy, err := c.Flags().GetString("signature-policy")
- if err == nil && c.Flag("signature-policy").Changed {
+ sigPolicy, err := flags.GetString("signature-policy")
+ if err == nil && findFlagFunc("signature-policy").Changed {
ctx.SignaturePolicyPath = sigPolicy
}
- authfile, err := c.Flags().GetString("authfile")
+ authfile, err := flags.GetString("authfile")
if err == nil {
ctx.AuthFilePath = getAuthFile(authfile)
}
- regConf, err := c.Flags().GetString("registries-conf")
- if err == nil && c.Flag("registries-conf").Changed {
+ regConf, err := flags.GetString("registries-conf")
+ if err == nil && findFlagFunc("registries-conf").Changed {
ctx.SystemRegistriesConfPath = regConf
}
- regConfDir, err := c.Flags().GetString("registries-conf-dir")
- if err == nil && c.Flag("registries-conf-dir").Changed {
+ regConfDir, err := flags.GetString("registries-conf-dir")
+ if err == nil && findFlagFunc("registries-conf-dir").Changed {
ctx.RegistriesDirPath = regConfDir
}
- shortNameAliasConf, err := c.Flags().GetString("short-name-alias-conf")
- if err == nil && c.Flag("short-name-alias-conf").Changed {
+ shortNameAliasConf, err := flags.GetString("short-name-alias-conf")
+ if err == nil && findFlagFunc("short-name-alias-conf").Changed {
ctx.UserShortNameAliasConfPath = shortNameAliasConf
}
ctx.DockerRegistryUserAgent = fmt.Sprintf("Buildah/%s", define.Version)
- if c.Flag("os") != nil && c.Flag("os").Changed {
+ if findFlagFunc("os") != nil && findFlagFunc("os").Changed {
var os string
- if os, err = c.Flags().GetString("os"); err != nil {
+ if os, err = flags.GetString("os"); err != nil {
return nil, err
}
ctx.OSChoice = os
}
- if c.Flag("arch") != nil && c.Flag("arch").Changed {
+ if findFlagFunc("arch") != nil && findFlagFunc("arch").Changed {
var arch string
- if arch, err = c.Flags().GetString("arch"); err != nil {
+ if arch, err = flags.GetString("arch"); err != nil {
return nil, err
}
ctx.ArchitectureChoice = arch
}
- if c.Flag("variant") != nil && c.Flag("variant").Changed {
+ if findFlagFunc("variant") != nil && findFlagFunc("variant").Changed {
var variant string
- if variant, err = c.Flags().GetString("variant"); err != nil {
+ if variant, err = flags.GetString("variant"); err != nil {
return nil, err
}
ctx.VariantChoice = variant
}
- if c.Flag("platform") != nil && c.Flag("platform").Changed {
+ if findFlagFunc("platform") != nil && findFlagFunc("platform").Changed {
var specs []string
- if specs, err = c.Flags().GetStringSlice("platform"); err != nil {
+ if specs, err = flags.GetStringSlice("platform"); err != nil {
return nil, err
}
if len(specs) == 0 || specs[0] == "" {
@@ -664,8 +670,13 @@ func AuthConfig(creds string) (*types.DockerAuthConfig, error) {
// IDMappingOptions parses the build options related to user namespaces and ID mapping.
func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
- user := c.Flag("userns-uid-map-user").Value.String()
- group := c.Flag("userns-gid-map-group").Value.String()
+ return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag)
+}
+
+// IDMappingOptionsFromFlagSet parses the build options related to user namespaces and ID mapping.
+func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.FlagSet, findFlagFunc func(name string) *pflag.Flag) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) {
+ user := findFlagFunc("userns-uid-map-user").Value.String()
+ group := findFlagFunc("userns-gid-map-group").Value.String()
// If only the user or group was specified, use the same value for the
// other, since we need both in order to initialize the maps using the
// names.
@@ -684,7 +695,7 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio
}
mappings = submappings
}
- globalOptions := c.PersistentFlags()
+ globalOptions := persistentFlags
// We'll parse the UID and GID mapping options the same way.
buildIDMap := func(basemap []idtools.IDMap, option string) ([]specs.LinuxIDMapping, error) {
outmap := make([]specs.LinuxIDMapping, 0, len(basemap))
@@ -702,8 +713,8 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio
if globalOptions.Lookup(option) != nil && globalOptions.Lookup(option).Changed {
spec, _ = globalOptions.GetStringSlice(option)
}
- if c.Flag(option).Changed {
- spec, _ = c.Flags().GetStringSlice(option)
+ if findFlagFunc(option).Changed {
+ spec, _ = flags.GetStringSlice(option)
}
idmap, err := parseIDMap(spec)
if err != nil {
@@ -744,8 +755,8 @@ func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptio
}
// If the user specifically requested that we either use or don't use
// user namespaces, override that default.
- if c.Flag("userns").Changed {
- how := c.Flag("userns").Value.String()
+ if findFlagFunc("userns").Changed {
+ how := findFlagFunc("userns").Value.String()
switch how {
case "", "container", "private":
usernsOption.Host = false
@@ -814,8 +825,6 @@ func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name st
if flags.Lookup(what) != nil && findFlagFunc(what).Changed {
how := findFlagFunc(what).Value.String()
switch what {
- case "network":
- what = string(specs.NetworkNamespace)
case "cgroupns":
what = string(specs.CgroupNamespace)
}
@@ -845,8 +854,11 @@ func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name st
}
}
how = strings.TrimPrefix(how, "ns:")
- if _, err := os.Stat(how); err != nil {
- return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what)
+ // if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go
+ if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) {
+ if _, err := os.Stat(how); err != nil {
+ return nil, define.NetworkDefault, errors.Wrapf(err, "checking %s namespace", what)
+ }
}
policy = define.NetworkEnabled
logrus.Debugf("setting %q namespace to %q", what, how)
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index 0ca720470..a9825eb05 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -165,6 +165,11 @@ func (b *Builder) Run(command []string, options RunOptions) error {
return err
}
+ // rootless and networks are not supported
+ if len(configureNetworks) > 0 && isolation == IsolationOCIRootless {
+ return errors.New("cannot use networks as rootless")
+ }
+
homeDir, err := b.configureUIDGID(g, mountPoint, options)
if err != nil {
return err
@@ -800,11 +805,10 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
runtime := options.Runtime
if runtime == "" {
runtime = util.Runtime()
-
- localRuntime := util.FindLocalRuntime(runtime)
- if localRuntime != "" {
- runtime = localRuntime
- }
+ }
+ localRuntime := util.FindLocalRuntime(runtime)
+ if localRuntime != "" {
+ runtime = localRuntime
}
// Default to just passing down our stdio.
@@ -1687,7 +1691,7 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
namespaceOptions.AddOrReplace(options.NamespaceOptions...)
networkPolicy := options.ConfigureNetwork
- //Nothing was specified explictily so network policy should be inherited from builder
+ //Nothing was specified explicitly so network policy should be inherited from builder
if networkPolicy == NetworkDefault {
networkPolicy = b.ConfigureNetwork
@@ -1788,7 +1792,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
var foundrw, foundro, foundz, foundZ, foundO, foundU bool
- var rootProp string
+ var rootProp, upperDir, workDir string
for _, opt := range options {
switch opt {
case "rw":
@@ -1806,6 +1810,19 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
case "private", "rprivate", "slave", "rslave", "shared", "rshared":
rootProp = opt
}
+
+ if strings.HasPrefix(opt, "upperdir") {
+ splitOpt := strings.SplitN(opt, "=", 2)
+ if len(splitOpt) > 1 {
+ upperDir = splitOpt[1]
+ }
+ }
+ if strings.HasPrefix(opt, "workdir") {
+ splitOpt := strings.SplitN(opt, "=", 2)
+ if len(splitOpt) > 1 {
+ workDir = splitOpt[1]
+ }
+ }
}
if !foundrw && !foundro {
options = append(options, "rw")
@@ -1826,6 +1843,10 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
}
}
if foundO {
+ if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") {
+ return specs.Mount{}, errors.New("if specifying upperdir then workdir must be specified or vice versa")
+ }
+
containerDir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return specs.Mount{}, err
@@ -1836,7 +1857,14 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
return specs.Mount{}, errors.Wrapf(err, "failed to create TempDir in the %s directory", containerDir)
}
- overlayMount, err := overlay.Mount(contentDir, host, container, rootUID, rootGID, b.store.GraphOptions())
+ overlayOpts := overlay.Options{RootUID: rootUID,
+ RootGID: rootGID,
+ UpperDirOptionFragment: upperDir,
+ WorkDirOptionFragment: workDir,
+ GraphOpts: b.store.GraphOptions(),
+ }
+
+ overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts)
if err == nil {
b.TempVolumes[contentDir] = true
}
@@ -2321,8 +2349,7 @@ func checkAndOverrideIsolationOptions(isolation define.Isolation, options *RunOp
if ns := options.NamespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil {
hostNetworking = ns.Host
networkNamespacePath = ns.Path
- if !hostNetworking && networkNamespacePath != "" && !filepath.IsAbs(networkNamespacePath) {
- logrus.Debugf("Disabling network namespace configuration.")
+ if hostNetworking {
networkNamespacePath = ""
}
}
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index 47c9ac5cd..9bfa9d268 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -123,8 +123,8 @@ func isRegistryBlocked(registry string, sc *types.SystemContext) (bool, error) {
// isReferenceSomething checks if the registry part of a reference is insecure or blocked
func isReferenceSomething(ref types.ImageReference, sc *types.SystemContext, what func(string, *types.SystemContext) (bool, error)) (bool, error) {
- if ref != nil && ref.DockerReference() != nil {
- if named, ok := ref.DockerReference().(reference.Named); ok {
+ if ref != nil {
+ if named := ref.DockerReference(); named != nil {
if domain := reference.Domain(named); domain != "" {
return what(domain, sc)
}
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 7024a821f..13c602c00 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -17,7 +17,6 @@ import (
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/pkg/shortnames"
- "github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/types"
@@ -48,9 +47,8 @@ var (
// resolveName checks if name is a valid image name, and if that name doesn't
// include a domain portion, returns a list of the names which it might
-// correspond to in the set of configured registries, the transport used to
-// pull the image, and a boolean which is true iff
-// 1) the list of search registries was used, and 2) it was empty.
+// correspond to in the set of configured registries, and the transport used to
+// pull the image.
//
// The returned image names never include a transport: prefix, and if transport != "",
// (transport, image) should be a valid input to alltransports.ParseImageName.
@@ -59,9 +57,9 @@ var (
//
// NOTE: The "list of search registries is empty" check does not count blocked registries,
// and neither the implied "localhost" nor a possible firstRegistry are counted
-func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, bool, error) {
+func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]string, string, error) {
if name == "" {
- return nil, "", false, nil
+ return nil, "", nil
}
// Maybe it's a truncated image ID. Don't prepend a registry name, then.
@@ -69,7 +67,7 @@ func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]s
if img, err := store.Image(name); err == nil && img != nil && strings.HasPrefix(img.ID, name) {
// It's a truncated version of the ID of an image that's present in local storage;
// we need only expand the ID.
- return []string{img.ID}, "", false, nil
+ return []string{img.ID}, "", nil
}
}
// If we're referring to an image by digest, it *must* be local and we
@@ -77,51 +75,32 @@ func resolveName(name string, sc *types.SystemContext, store storage.Store) ([]s
if strings.HasPrefix(name, "sha256:") {
d, err := digest.Parse(name)
if err != nil {
- return nil, "", false, err
+ return nil, "", err
}
img, err := store.Image(d.Encoded())
if err != nil {
- return nil, "", false, err
+ return nil, "", err
}
- return []string{img.ID}, "", false, nil
+ return []string{img.ID}, "", nil
}
// Transports are not supported for local image look ups.
srcRef, err := alltransports.ParseImageName(name)
if err == nil {
- return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), false, nil
+ return []string{srcRef.StringWithinTransport()}, srcRef.Transport().Name(), nil
}
- // Figure out the list of registries.
- var registries []string
- searchRegistries, err := sysregistriesv2.UnqualifiedSearchRegistries(sc)
- if err != nil {
- logrus.Debugf("unable to read configured registries to complete %q: %v", name, err)
- searchRegistries = nil
- }
- for _, registry := range searchRegistries {
- reg, err := sysregistriesv2.FindRegistry(sc, registry)
- if err != nil {
- logrus.Debugf("unable to read registry configuration for %#v: %v", registry, err)
- continue
- }
- if reg == nil || !reg.Blocked {
- registries = append(registries, registry)
- }
- }
- searchRegistriesAreEmpty := len(registries) == 0
-
var candidates []string
// Local short-name resolution.
namedCandidates, err := shortnames.ResolveLocally(sc, name)
if err != nil {
- return nil, "", false, err
+ return nil, "", err
}
for _, named := range namedCandidates {
candidates = append(candidates, named.String())
}
- return candidates, DefaultTransport, searchRegistriesAreEmpty, nil
+ return candidates, DefaultTransport, nil
}
// ExpandNames takes unqualified names, parses them as image names, and returns
@@ -132,7 +111,7 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora
expanded := make([]string, 0, len(names))
for _, n := range names {
var name reference.Named
- nameList, _, _, err := resolveName(n, systemContext, store)
+ nameList, _, err := resolveName(n, systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", n)
}
@@ -183,7 +162,7 @@ func ResolveNameToReferences(
systemContext *types.SystemContext,
image string,
) (refs []types.ImageReference, err error) {
- names, transport, _, err := resolveName(image, systemContext, store)
+ names, transport, err := resolveName(image, systemContext, store)
if err != nil {
return nil, errors.Wrapf(err, "error parsing name %q", image)
}
diff --git a/vendor/github.com/containers/common/libnetwork/cni/config.go b/vendor/github.com/containers/common/libnetwork/cni/config.go
index e34daa3c5..b0aa19d94 100644
--- a/vendor/github.com/containers/common/libnetwork/cni/config.go
+++ b/vendor/github.com/containers/common/libnetwork/cni/config.go
@@ -60,7 +60,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (
// Therefore the next podman command tries to create the default net again and it would
// fail because it thinks the network is used on the host.
var usedNetworks []*net.IPNet
- if !defaultNet {
+ if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver {
usedNetworks, err = internalutil.GetUsedSubnets(n)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go
index 6d2daf299..7de59f807 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/config.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go
@@ -74,7 +74,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
// Therefore the next podman command tries to create the default net again and it would
// fail because it thinks the network is used on the host.
var usedNetworks []*net.IPNet
- if !defaultNet {
+ if !defaultNet && newNetwork.Driver == types.BridgeNetworkDriver {
usedNetworks, err = internalutil.GetUsedSubnets(n)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go
index 69466a423..1812b9084 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/exec.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go
@@ -107,7 +107,7 @@ func (n *netavarkNetwork) execNetavark(args []string, stdin, result interface{})
logWriter = io.MultiWriter(logWriter, &logrusNetavarkWriter{})
}
- cmd := exec.Command(n.netavarkBinary, args...)
+ cmd := exec.Command(n.netavarkBinary, append(n.getCommonNetavarkOptions(), args...)...)
// connect the pipes to stdin and stdout
cmd.Stdin = stdinR
cmd.Stdout = stdoutW
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go
index ce252bc1d..7122acf98 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/network.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go
@@ -13,6 +13,7 @@ import (
"github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
"github.com/containers/storage/pkg/lockfile"
+ "github.com/containers/storage/pkg/unshare"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -21,8 +22,16 @@ type netavarkNetwork struct {
// networkConfigDir is directory where the network config files are stored.
networkConfigDir string
+ // networkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config etc
+ networkRunDir string
+
+ // tells netavark whether this is rootless mode or rootfull, "true" or "false"
+ networkRootless bool
+
// netavarkBinary is the path to the netavark binary.
netavarkBinary string
+ // aardvarkBinary is the path to the aardvark binary.
+ aardvarkBinary string
// defaultNetwork is the name for the default network.
defaultNetwork string
@@ -52,8 +61,10 @@ type InitConfig struct {
// NetavarkBinary is the path to the netavark binary.
NetavarkBinary string
+ // AardvarkBinary is the path to the aardvark binary.
+ AardvarkBinary string
- // NetworkRunDir is where temporary files are stored, i.e.the ipam db.
+ // NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config
NetworkRunDir string
// DefaultNetwork is the name for the default network.
@@ -99,7 +110,10 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
n := &netavarkNetwork{
networkConfigDir: conf.NetworkConfigDir,
+ networkRunDir: conf.NetworkRunDir,
netavarkBinary: conf.NetavarkBinary,
+ aardvarkBinary: conf.AardvarkBinary,
+ networkRootless: unshare.IsRootless(),
ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"),
defaultNetwork: defaultNetworkName,
defaultSubnet: defaultNet,
diff --git a/vendor/github.com/containers/common/libnetwork/netavark/run.go b/vendor/github.com/containers/common/libnetwork/netavark/run.go
index c6f2007e2..0a9dc3704 100644
--- a/vendor/github.com/containers/common/libnetwork/netavark/run.go
+++ b/vendor/github.com/containers/common/libnetwork/netavark/run.go
@@ -5,6 +5,7 @@ package netavark
import (
"encoding/json"
"fmt"
+ "strconv"
"github.com/containers/common/libnetwork/internal/util"
"github.com/containers/common/libnetwork/types"
@@ -110,6 +111,10 @@ func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownO
return retErr
}
+func (n *netavarkNetwork) getCommonNetavarkOptions() []string {
+ return []string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "--aardvark-binary=" + n.aardvarkBinary}
+}
+
func (n *netavarkNetwork) convertNetOpts(opts types.NetworkOptions) (*netavarkOptions, error) {
netavarkOptions := netavarkOptions{
NetworkOptions: opts,
diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go
index 37a910a24..cd4fd89f1 100644
--- a/vendor/github.com/containers/common/libnetwork/network/interface.go
+++ b/vendor/github.com/containers/common/libnetwork/network/interface.go
@@ -31,6 +31,11 @@ const (
netavarkConfigDir = "/etc/containers/networks"
// netavarkRunDir is the run directory for the rootful temporary network files such as the ipam db
netavarkRunDir = "/run/containers/networks"
+
+ // netavarkBinary is the name of the netavark binary
+ netavarkBinary = "netavark"
+ // aardvarkBinary is the name of the aardvark binary
+ aardvarkBinary = "aardvark-dns"
)
// NetworkBackend returns the network backend name and interface
@@ -51,11 +56,17 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type
switch backend {
case types.Netavark:
- netavarkBin, err := conf.FindHelperBinary("netavark", false)
+ netavarkBin, err := conf.FindHelperBinary(netavarkBinary, false)
if err != nil {
return "", nil, err
}
+ aardvarkBin, err := conf.FindHelperBinary(aardvarkBinary, false)
+ if err != nil {
+ // this is not a fatal error we can still use netavark without dns
+ logrus.Warnf("%s binary not found, container dns will not be enabled", aardvarkBin)
+ }
+
confDir := conf.Network.NetworkConfigDir
if confDir == "" {
confDir = getDefaultNetavarkConfigDir(store)
@@ -74,6 +85,7 @@ func NetworkBackend(store storage.Store, conf *config.Config, syslog bool) (type
NetworkConfigDir: confDir,
NetworkRunDir: runDir,
NetavarkBinary: netavarkBin,
+ AardvarkBinary: aardvarkBin,
DefaultNetwork: conf.Network.DefaultNetwork,
DefaultSubnet: conf.Network.DefaultSubnet,
Syslog: syslog,
diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go
index 6837a378a..dd30abcd6 100644
--- a/vendor/github.com/containers/common/pkg/config/config.go
+++ b/vendor/github.com/containers/common/pkg/config/config.go
@@ -72,6 +72,8 @@ type Config struct {
Network NetworkConfig `toml:"network"`
// Secret section defines configurations for the secret management
Secrets SecretConfig `toml:"secrets"`
+ // ConfigMap section defines configurations for the configmaps management
+ ConfigMaps ConfigMapConfig `toml:"configmaps"`
}
// ContainersConfig represents the "containers" TOML config table
@@ -179,11 +181,6 @@ type ContainersConfig struct {
// performance implications.
PrepareVolumeOnCreate bool `toml:"prepare_volume_on_create,omitempty"`
- // RootlessNetworking depicts the "kind" of networking for rootless
- // containers. Valid options are `slirp4netns` and `cni`. Default is
- // `slirp4netns` on Linux, and `cni` on non-Linux OSes.
- RootlessNetworking string `toml:"rootless_networking,omitempty"`
-
// SeccompProfile is the seccomp.json profile path which is used as the
// default for the runtime.
SeccompProfile string `toml:"seccomp_profile,omitempty"`
@@ -514,6 +511,17 @@ type SecretConfig struct {
Opts map[string]string `toml:"opts,omitempty"`
}
+// ConfigMapConfig represents the "configmap" TOML config table
+type ConfigMapConfig struct {
+ // Driver specifies the configmap driver to use.
+ // Current valid value:
+ // * file
+ // * pass
+ Driver string `toml:"driver,omitempty"`
+ // Opts contains driver specific options
+ Opts map[string]string `toml:"opts,omitempty"`
+}
+
// MachineConfig represents the "machine" TOML config table
type MachineConfig struct {
// Number of CPU's a machine is created with.
@@ -1180,7 +1188,7 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error)
return "", errors.Errorf("could not find %q in one of %v. %s", name, c.Engine.HelperBinariesDir, configHint)
}
-// ImageCopyTmpDir default directory to store tempory image files during copy
+// ImageCopyTmpDir default directory to store temporary image files during copy
func (c *Config) ImageCopyTmpDir() (string, error) {
if path, found := os.LookupEnv("TMPDIR"); found {
return path, nil
diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf
index b1e6f5435..f497d2bbe 100644
--- a/vendor/github.com/containers/common/pkg/config/containers.conf
+++ b/vendor/github.com/containers/common/pkg/config/containers.conf
@@ -197,10 +197,6 @@ default_sysctls = [
#
#prepare_volume_on_create = false
-# Indicates the networking to be used for rootless containers
-#
-#rootless_networking = "slirp4netns"
-
# Path to the seccomp.json profile which is used as the default seccomp profile
# for the runtime.
#
@@ -403,14 +399,16 @@ default_sysctls = [
# Infra (pause) container image name for pod infra containers. When running a
# pod, we start a `pause` process in a container to hold open the namespaces
# associated with the pod. This container does nothing other then sleep,
-# reserving the pods resources for the lifetime of the pod.
+# reserving the pods resources for the lifetime of the pod. By default container
+# engines run a builtin container using the pause executable. If you want override
+# specify an image to pull.
#
-#infra_image = "k8s.gcr.io/pause:3.4.1"
+#infra_image = ""
# Specify the locking mechanism to use; valid values are "shm" and "file".
# Change the default only if you are sure of what you are doing, in general
# "file" is useful only on platforms where cgo is not available for using the
-# faster "shm" lock type. You may need to run "podman system renumber" after
+# faster "shm" lock type. You may need to run "podman system renumber" after
# you change the lock type.
#
#lock_type** = "shm"
@@ -469,7 +467,7 @@ default_sysctls = [
#
#runtime = "crun"
-# List of the OCI runtimes that support --format=json. When json is supported
+# List of the OCI runtimes that support --format=json. When json is supported
# engine will use it for reporting nicer errors.
#
#runtime_supports_json = ["crun", "runc", "kata", "runsc", "krun"]
@@ -482,8 +480,8 @@ default_sysctls = [
#
#runtime_supports_nocgroups = ["crun", "krun"]
-# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment
-# variable. If you specify "storage", then the location of the
+# Default location for storing temporary container image content. Can be overridden with the TMPDIR environment
+# variable. If you specify "storage", then the location of the
# container/storage tmp directory will be used.
# image_copy_tmp_dir="/var/tmp"
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 55e4e4b67..279119749 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -46,7 +46,7 @@ var (
// DefaultInitPath is the default path to the container-init binary
DefaultInitPath = "/usr/libexec/podman/catatonit"
// DefaultInfraImage to use for infra container
- DefaultInfraImage = "k8s.gcr.io/pause:3.5"
+ DefaultInfraImage = ""
// DefaultRootlessSHMLockPath is the default path for rootless SHM locks
DefaultRootlessSHMLockPath = "/libpod_rootless_lock"
// DefaultDetachKeys is the default keys sequence for detaching a
@@ -177,23 +177,22 @@ func DefaultConfig() (*Config, error) {
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
},
- EnvHost: false,
- HTTPProxy: true,
- Init: false,
- InitPath: "",
- IPCNS: "private",
- LogDriver: defaultLogDriver(),
- LogSizeMax: DefaultLogSizeMax,
- NetNS: "private",
- NoHosts: false,
- PidsLimit: DefaultPidsLimit,
- PidNS: "private",
- RootlessNetworking: getDefaultRootlessNetwork(),
- ShmSize: DefaultShmSize,
- TZ: "",
- Umask: "0022",
- UTSNS: "private",
- UserNSSize: DefaultUserNSSize,
+ EnvHost: false,
+ HTTPProxy: true,
+ Init: false,
+ InitPath: "",
+ IPCNS: "private",
+ LogDriver: defaultLogDriver(),
+ LogSizeMax: DefaultLogSizeMax,
+ NetNS: "private",
+ NoHosts: false,
+ PidsLimit: DefaultPidsLimit,
+ PidNS: "private",
+ ShmSize: DefaultShmSize,
+ TZ: "",
+ Umask: "0022",
+ UTSNS: "private",
+ UserNSSize: DefaultUserNSSize,
},
Network: NetworkConfig{
DefaultNetwork: "podman",
@@ -566,9 +565,3 @@ func (c *Config) LogDriver() string {
func (c *Config) MachineEnabled() bool {
return c.Engine.MachineEnabled
}
-
-// RootlessNetworking returns the "kind" of networking
-// rootless containers should use
-func (c *Config) RootlessNetworking() string {
- return c.Containers.RootlessNetworking
-}
diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go
index 9446d3ff9..cc2d0fe3e 100644
--- a/vendor/github.com/containers/common/pkg/config/default_linux.go
+++ b/vendor/github.com/containers/common/pkg/config/default_linux.go
@@ -24,12 +24,6 @@ func getDefaultMachineUser() string {
return "core"
}
-// getDefaultRootlessNetwork returns the default rootless network configuration.
-// It is "slirp4netns" for Linux.
-func getDefaultRootlessNetwork() string {
- return "slirp4netns"
-}
-
// getDefaultProcessLimits returns the nproc for the current process in ulimits format
// Note that nfile sometimes cannot be set to unlimited, and the limit is hardcoded
// to (oldMaxSize) 1048576 (2^20), see: http://stackoverflow.com/a/1213069/1811501
diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go
index b6ee286ec..1aa7f6ef3 100644
--- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go
+++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go
@@ -13,12 +13,6 @@ func getDefaultMachineUser() string {
return "core"
}
-// getDefaultRootlessNetwork returns the default rootless network configuration.
-// It is "cni" for non-Linux OSes (to better support `podman-machine` usecases).
-func getDefaultRootlessNetwork() string {
- return "cni"
-}
-
// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode.
func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) {
return false, nil
diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go
index 5f8dd1a28..28f102f1c 100644
--- a/vendor/github.com/containers/common/pkg/config/default_windows.go
+++ b/vendor/github.com/containers/common/pkg/config/default_windows.go
@@ -11,12 +11,6 @@ func getDefaultMachineUser() string {
return "user"
}
-// getDefaultRootlessNetwork returns the default rootless network configuration.
-// It is "cni" for non-Linux OSes (to better support `podman-machine` usecases).
-func getDefaultRootlessNetwork() string {
- return "cni"
-}
-
// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode.
func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) {
return false, nil
diff --git a/vendor/github.com/containers/common/pkg/flag/flag.go b/vendor/github.com/containers/common/pkg/flag/flag.go
index 52eb50da0..7d6b6a534 100644
--- a/vendor/github.com/containers/common/pkg/flag/flag.go
+++ b/vendor/github.com/containers/common/pkg/flag/flag.go
@@ -95,7 +95,7 @@ func (os *OptionalString) Value() string {
// newoptionalString
type optionalStringValue OptionalString
-// NewOptionalStringValue returns a pflag.Value fo the string.
+// NewOptionalStringValue returns a pflag.Value for the string.
func NewOptionalStringValue(p *OptionalString) pflag.Value {
p.present = false
return (*optionalStringValue)(p)
diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go
index fda129c83..5d826e805 100644
--- a/vendor/github.com/containers/common/pkg/parse/parse.go
+++ b/vendor/github.com/containers/common/pkg/parse/parse.go
@@ -14,9 +14,27 @@ import (
// ValidateVolumeOpts validates a volume's options
func ValidateVolumeOpts(options []string) ([]string, error) {
- var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown int
+ var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int
finalOpts := make([]string, 0, len(options))
for _, opt := range options {
+ // support advanced options like upperdir=/path, workdir=/path
+ if strings.Contains(opt, "upperdir") {
+ foundUpperDir++
+ if foundUpperDir > 1 {
+ return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", "))
+ }
+ finalOpts = append(finalOpts, opt)
+ continue
+ }
+ if strings.Contains(opt, "workdir") {
+ foundWorkDir++
+ if foundWorkDir > 1 {
+ return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", "))
+ }
+ finalOpts = append(finalOpts, opt)
+ continue
+ }
+
switch opt {
case "noexec", "exec":
foundExec++
diff --git a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go
index 22aacb1ce..846bd5c17 100644
--- a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go
+++ b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go
@@ -36,7 +36,7 @@ type driverConfig struct {
LookupCommand string `mapstructure:"lookup"`
// StoreCommand contains a shell command that stores a secret.
// The secret id is provided as environment variable SECRET_ID
- // The secret value itself is provied over stdin
+ // The secret value itself is provided over stdin
StoreCommand string `mapstructure:"store"`
}
diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go
index 6c9321e73..3c0d2b237 100644
--- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go
+++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go
@@ -149,14 +149,15 @@ func getMountsMap(path string) (string, string, error) { //nolint
// MountsWithUIDGID copies, adds, and mounts the subscriptions to the container root filesystem
// mountLabel: MAC/SELinux label for container content
-// containerWorkingDir: Private data for storing subscriptions on the host mounted in container.
+// containerRunDir: Private data for storing subscriptions on the host mounted in container.
// mountFile: Additional mount points required for the container.
-// mountPoint: Container image mountpoint
+// mountPoint: Container image mountpoint, or the directory from the hosts perspective that
+// corresponds to `/` in the container.
// uid: to assign to content created for subscriptions
// gid: to assign to content created for subscriptions
// rootless: indicates whether container is running in rootless mode
// disableFips: indicates whether system should ignore fips mode
-func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount {
+func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, uid, gid int, rootless, disableFips bool) []rspec.Mount {
var (
subscriptionMounts []rspec.Mount
mountFiles []string
@@ -174,7 +175,7 @@ func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint str
}
for _, file := range mountFiles {
if _, err := os.Stat(file); err == nil {
- mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerWorkingDir, uid, gid)
+ mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerRunDir, uid, gid)
if err != nil {
logrus.Warnf("Failed to mount subscriptions, skipping entry in %s: %v", file, err)
}
@@ -191,7 +192,7 @@ func MountsWithUIDGID(mountLabel, containerWorkingDir, mountFile, mountPoint str
_, err := os.Stat("/etc/system-fips")
switch {
case err == nil:
- if err := addFIPSModeSubscription(&subscriptionMounts, containerWorkingDir, mountPoint, mountLabel, uid, gid); err != nil {
+ if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil {
logrus.Errorf("Adding FIPS mode subscription to container: %v", err)
}
case os.IsNotExist(err):
@@ -210,7 +211,7 @@ func rchown(chowndir string, uid, gid int) error {
// addSubscriptionsFromMountsFile copies the contents of host directory to container directory
// and returns a list of mounts
-func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir string, uid, gid int) ([]rspec.Mount, error) {
+func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string, uid, gid int) ([]rspec.Mount, error) {
var mounts []rspec.Mount
defaultMountsPaths := getMounts(filePath)
for _, path := range defaultMountsPaths {
@@ -228,7 +229,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st
return nil, err
}
- ctrDirOrFileOnHost := filepath.Join(containerWorkingDir, ctrDirOrFile)
+ ctrDirOrFileOnHost := filepath.Join(containerRunDir, ctrDirOrFile)
// In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost
_, err = os.Stat(ctrDirOrFileOnHost)
@@ -300,13 +301,17 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerWorkingDir st
return mounts, nil
}
-// addFIPSModeSubscription creates /run/secrets/system-fips in the container
-// root filesystem if /etc/system-fips exists on hosts.
-// This enables the container to be FIPS compliant and run openssl in
-// FIPS mode as the host is also in FIPS mode.
-func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPoint, mountLabel string, uid, gid int) error {
+// addFIPSModeSubscription adds mounts to the `mounts` slice that are needed for the container to run openssl in FIPs mode
+// (i.e: be FIPs compliant).
+// It should only be called if /etc/system-fips exists on host.
+// It primarily does two things:
+// - creates /run/secrets/system-fips in the container root filesystem, and adds it to the `mounts` slice.
+// - If `/etc/crypto-policies/back-ends` already exists inside of the container, it creates
+// `/usr/share/crypto-policies/back-ends/FIPS` inside the container as well.
+// It is done from within the container to ensure to avoid policy incompatibility between the container and host.
+func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error {
subscriptionsDir := "/run/secrets"
- ctrDirOnHost := filepath.Join(containerWorkingDir, subscriptionsDir)
+ ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir)
if _, err := os.Stat(ctrDirOnHost); os.IsNotExist(err) {
if err = idtools.MkdirAllAs(ctrDirOnHost, 0755, uid, gid); err != nil { //nolint
return err
@@ -322,7 +327,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerWorkingDir, mountPo
if err != nil {
return errors.Wrap(err, "creating system-fips file in container for FIPS mode")
}
- defer file.Close()
+ file.Close()
}
if !mountExists(*mounts, subscriptionsDir) {
diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go
index b6ceabce5..38b0d443b 100644
--- a/vendor/github.com/containers/common/version/version.go
+++ b/vendor/github.com/containers/common/version/version.go
@@ -1,4 +1,4 @@
package version
// Version is the version of the build.
-const Version = "0.46.1-dev"
+const Version = "0.47.2"
diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go
index 383215182..512e643b9 100644
--- a/vendor/github.com/containers/image/v5/copy/copy.go
+++ b/vendor/github.com/containers/image/v5/copy/copy.go
@@ -124,6 +124,7 @@ type ImageListSelection int
type Options struct {
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
+ SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`.
ReportWriter io.Writer
SourceCtx *types.SystemContext
DestinationCtx *types.SystemContext
@@ -569,7 +570,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
// Sign the manifest list.
if options.SignBy != "" {
- newSig, err := c.createSignature(manifestList, options.SignBy)
+ newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase)
if err != nil {
return nil, err
}
@@ -791,7 +792,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
}
if options.SignBy != "" {
- newSig, err := c.createSignature(manifestBytes, options.SignBy)
+ newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase)
if err != nil {
return nil, "", "", err
}
diff --git a/vendor/github.com/containers/image/v5/copy/sign.go b/vendor/github.com/containers/image/v5/copy/sign.go
index 61612a4d3..21a3facd7 100644
--- a/vendor/github.com/containers/image/v5/copy/sign.go
+++ b/vendor/github.com/containers/image/v5/copy/sign.go
@@ -7,7 +7,7 @@ import (
)
// createSignature creates a new signature of manifest using keyIdentity.
-func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) {
+func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string) ([]byte, error) {
mech, err := signature.NewGPGSigningMechanism()
if err != nil {
return nil, errors.Wrap(err, "initializing GPG")
@@ -23,7 +23,7 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, e
}
c.Printf("Signing manifest\n")
- newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
+ newSig, err := signature.SignDockerManifestWithOptions(manifest, dockerReference.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
if err != nil {
return nil, errors.Wrap(err, "creating signature")
}
diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go
index 511cdcc37..20955ab7f 100644
--- a/vendor/github.com/containers/image/v5/manifest/common.go
+++ b/vendor/github.com/containers/image/v5/manifest/common.go
@@ -51,7 +51,7 @@ const (
// other than the ones the caller specifically allows.
// expectedMIMEType is used only for diagnostics.
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
-// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambigous
+// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
// data that just isn’t what was expected, as opposed to actually ambiguous data.
func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
allowed allowedManifestFields) error {
@@ -71,7 +71,7 @@ func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
Manifests interface{} `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
- // The caller was supposed to already validate version numbers, so this shold not happen;
+ // The caller was supposed to already validate version numbers, so this should not happen;
// let’s not bother with making this error “nice”.
return err
}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index 8fa530549..1d73dc405 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -667,6 +667,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
// This intentionally uses "registry", not "key"; we don't support namespaced
// credentials in helpers.
if ch, exists := auths.CredHelpers[registry]; exists {
+ logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path)
return getAuthFromCredHelper(ch, registry)
}
@@ -703,6 +704,9 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
}
}
+ // Only log this if we found nothing; getCredentialsWithHomeDir logs the
+ // source of found data.
+ logrus.Debugf("No credentials matching %s found in %s", key, path)
return types.DockerAuthConfig{}, nil
}
diff --git a/vendor/github.com/containers/image/v5/sif/load.go b/vendor/github.com/containers/image/v5/sif/load.go
new file mode 100644
index 000000000..ba6d875ba
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/sif/load.go
@@ -0,0 +1,211 @@
+package sif
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+ "github.com/sylabs/sif/v2/pkg/sif"
+)
+
+// injectedScriptTargetPath is the path injectedScript should be written to in the created image.
+const injectedScriptTargetPath = "/podman/runscript"
+
+// parseDefFile parses a SIF definition file from reader,
+// and returns non-trivial contents of the %environment and %runscript sections.
+func parseDefFile(reader io.Reader) ([]string, []string, error) {
+ type parserState int
+ const (
+ parsingOther parserState = iota
+ parsingEnvironment
+ parsingRunscript
+ )
+
+ environment := []string{}
+ runscript := []string{}
+
+ state := parsingOther
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ s := strings.TrimSpace(scanner.Text())
+ switch {
+ case s == `%environment`:
+ state = parsingEnvironment
+ case s == `%runscript`:
+ state = parsingRunscript
+ case strings.HasPrefix(s, "%"):
+ state = parsingOther
+ case state == parsingEnvironment:
+ if s != "" && !strings.HasPrefix(s, "#") {
+ environment = append(environment, s)
+ }
+ case state == parsingRunscript:
+ runscript = append(runscript, s)
+ default: // parsingOther: ignore the line
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err)
+ }
+ return environment, runscript, nil
+}
+
+// generateInjectedScript generates a shell script based on
+// SIF definition file %environment and %runscript data, and returns it.
+func generateInjectedScript(environment []string, runscript []string) []byte {
+ script := fmt.Sprintf("#!/bin/bash\n"+
+ "%s\n"+
+ "%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n"))
+ return []byte(script)
+}
+
+// processDefFile finds sif.DataDeffile in sifImage, if any,
+// and returns:
+// - the command to run
+// - contents of a script to inject as injectedScriptTargetPath, or nil
+func processDefFile(sifImage *sif.FileImage) (string, []byte, error) {
+ var environment, runscript []string
+
+ desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile))
+ if err == nil {
+ environment, runscript, err = parseDefFile(desc.GetReader())
+ if err != nil {
+ return "", nil, err
+ }
+ }
+
+ var command string
+ var injectedScript []byte
+ if len(environment) == 0 && len(runscript) == 0 {
+ command = "bash"
+ injectedScript = nil
+ } else {
+ injectedScript = generateInjectedScript(environment, runscript)
+ command = injectedScriptTargetPath
+ }
+
+ return command, injectedScript, nil
+}
+
+func writeInjectedScript(extractedRootPath string, injectedScript []byte) error {
+ if injectedScript == nil {
+ return nil
+ }
+ filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath)
+ parentDirPath := filepath.Dir(filePath)
+ if err := os.MkdirAll(parentDirPath, 0755); err != nil {
+ return fmt.Errorf("creating %s: %w", parentDirPath, err)
+ }
+ if err := ioutil.WriteFile(filePath, injectedScript, 0755); err != nil {
+ return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err)
+ }
+ return nil
+}
+
+// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath.
+// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use,
+// if necessary.
+func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error {
+ // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
+ // for our use.
+ defer os.RemoveAll(extractedRootPath)
+
+ // Almost everything in extractedRootPath comes from squashFSPath.
+ conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./",
+ extractedRootPath, squashFSPath, extractedRootPath, tarPath)
+ script := "#!/bin/sh\n" + conversionCommand + "\n"
+ if err := ioutil.WriteFile(scriptPath, []byte(script), 0755); err != nil {
+ return err
+ }
+ defer os.Remove(scriptPath)
+
+ // On top of squashFSPath, we only add injectedScript, if necessary.
+ if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil {
+ return err
+ }
+
+ logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand)
+ cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("converting image: %w, output: %s", err, string(output))
+ }
+ logrus.Debugf("... finished converting squashfs to tar")
+ return nil
+}
+
+// convertSIFToElements processes sifImage and creates/returns
+// the relevant elements for constructing an OCI-like image:
+// - A path to a tar file containing a root filesystem,
+// - A command to run.
+// The returned tar file path is inside tempDir, which can be assumed to be empty
+// at start, and is exclusively used by the current process (i.e. it is safe
+// to use hard-coded relative paths within it).
+func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) {
+ // We could allocate unique names for all of these using ioutil.Temp*, but tempDir is exclusive,
+ // so we can just hard-code a set of unique values here.
+ // We create and/or manage cleanup of these two paths.
+ squashFSPath := filepath.Join(tempDir, "rootfs.squashfs")
+ tarPath := filepath.Join(tempDir, "rootfs.tar")
+ // We only allocate these paths, the user is responsible for cleaning them up.
+ extractedRootPath := filepath.Join(tempDir, "rootfs")
+ scriptPath := filepath.Join(tempDir, "script")
+
+ succeeded := false
+ // It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
+ // for our use.
+ // Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need
+ // to run both creation and consumption of extractedRootPath in the same fakeroot context.
+ // So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2
+ // uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time.
+ // That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser
+ // reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy.
+ defer os.Remove(squashFSPath)
+ defer func() {
+ if !succeeded {
+ os.Remove(tarPath)
+ }
+ }()
+
+ command, injectedScript, err := processDefFile(sifImage)
+ if err != nil {
+ return "", nil, err
+ }
+
+ rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys))
+ if err != nil {
+ return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err)
+ }
+ // TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4
+ // has an -o option that allows extracting a squashfs from the SIF file directly,
+ // but that version is not currently available in RHEL 8.
+ logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath)
+ if err := func() error { // A scope for defer
+ f, err := os.Create(squashFSPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil {
+ return err
+ }
+ return nil
+ }(); err != nil {
+ return "", nil, err
+ }
+ logrus.Debugf("... finished creating a temporary squashfs image")
+
+ if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil {
+ return "", nil, err
+ }
+ succeeded = true
+ return tarPath, []string{command}, nil
+}
diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go
new file mode 100644
index 000000000..ba95a469f
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/sif/src.go
@@ -0,0 +1,217 @@
+package sif
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+
+ "github.com/containers/image/v5/internal/tmpdir"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/go-digest"
+ imgspecs "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sirupsen/logrus"
+ "github.com/sylabs/sif/v2/pkg/sif"
+)
+
+type sifImageSource struct {
+ ref sifReference
+ workDir string
+ layerDigest digest.Digest
+ layerSize int64
+ layerFile string
+ config []byte
+ configDigest digest.Digest
+ manifest []byte
+}
+
+// getBlobInfo returns the digest, and size of the provided file.
+func getBlobInfo(path string) (digest.Digest, int64, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return "", -1, fmt.Errorf("opening %q for reading: %w", path, err)
+ }
+ defer f.Close()
+
+ // TODO: Instead of writing the tar file to disk, and reading
+ // it here again, stream the tar file to a pipe and
+ // compute the digest while writing it to disk.
+ logrus.Debugf("Computing a digest of the SIF conversion output...")
+ digester := digest.Canonical.Digester()
+ // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
+ size, err := io.Copy(digester.Hash(), f)
+ if err != nil {
+ return "", -1, fmt.Errorf("reading %q: %w", path, err)
+ }
+ digest := digester.Digest()
+ logrus.Debugf("... finished computing the digest of the SIF conversion output")
+
+ return digest, size, nil
+}
+
+// newImageSource returns an ImageSource for reading from an existing directory.
+// newImageSource extracts SIF objects and saves them in a temp directory.
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) {
+ sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY))
+ if err != nil {
+ return nil, fmt.Errorf("loading SIF file: %w", err)
+ }
+ defer func() {
+ _ = sifImg.UnloadContainer()
+ }()
+
+ workDir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif")
+ if err != nil {
+ return nil, fmt.Errorf("creating temp directory: %w", err)
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ os.RemoveAll(workDir)
+ }
+ }()
+
+ layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir)
+ if err != nil {
+ return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err)
+ }
+
+ layerDigest, layerSize, err := getBlobInfo(layerPath)
+ if err != nil {
+ return nil, fmt.Errorf("gathering blob information: %w", err)
+ }
+
+ created := sifImg.ModifiedAt()
+ config := imgspecv1.Image{
+ Created: &created,
+ Architecture: sifImg.PrimaryArch(),
+ OS: "linux",
+ Config: imgspecv1.ImageConfig{
+ Cmd: commandLine,
+ },
+ RootFS: imgspecv1.RootFS{
+ Type: "layers",
+ DiffIDs: []digest.Digest{layerDigest},
+ },
+ History: []imgspecv1.History{
+ {
+ Created: &created,
+ CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
+ Comment: "imported from SIF, uuid: " + sifImg.ID(),
+ },
+ {
+ Created: &created,
+ CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]",
+ EmptyLayer: true,
+ },
+ },
+ }
+ configBytes, err := json.Marshal(&config)
+ if err != nil {
+ return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err)
+ }
+ configDigest := digest.Canonical.FromBytes(configBytes)
+
+ manifest := imgspecv1.Manifest{
+ Versioned: imgspecs.Versioned{SchemaVersion: 2},
+ MediaType: imgspecv1.MediaTypeImageManifest,
+ Config: imgspecv1.Descriptor{
+ Digest: configDigest,
+ Size: int64(len(configBytes)),
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ },
+ Layers: []imgspecv1.Descriptor{{
+ Digest: layerDigest,
+ Size: layerSize,
+ MediaType: imgspecv1.MediaTypeImageLayer,
+ }},
+ }
+ manifestBytes, err := json.Marshal(&manifest)
+ if err != nil {
+ return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err)
+ }
+
+ succeeded = true
+ return &sifImageSource{
+ ref: ref,
+ workDir: workDir,
+ layerDigest: layerDigest,
+ layerSize: layerSize,
+ layerFile: layerPath,
+ config: configBytes,
+ configDigest: configDigest,
+ manifest: manifestBytes,
+ }, nil
+}
+
+// Reference returns the reference used to set up this source.
+func (s *sifImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *sifImageSource) Close() error {
+ return os.RemoveAll(s.workDir)
+}
+
+// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
+func (s *sifImageSource) HasThreadSafeGetBlob() bool {
+ return true
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
+func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
+ switch info.Digest {
+ case s.configDigest:
+ return ioutil.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil
+ case s.layerDigest:
+ reader, err := os.Open(s.layerFile)
+ if err != nil {
+ return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err)
+ }
+ return reader, s.layerSize, nil
+ default:
+ return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String())
+ }
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
+// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
+func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
+ if instanceDigest != nil {
+ return nil, "", errors.New("manifest lists are not supported by the sif transport")
+ }
+ return s.manifest, imgspecv1.MediaTypeImageManifest, nil
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
+ if instanceDigest != nil {
+ return nil, errors.New("manifest lists are not supported by the sif transport")
+ }
+ return nil, nil
+}
+
+// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
+// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
+// to read the image's layers.
+// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
+// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
+// (e.g. if the source never returns manifest lists).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/containers/image/v5/sif/transport.go b/vendor/github.com/containers/image/v5/sif/transport.go
new file mode 100644
index 000000000..18d894bc3
--- /dev/null
+++ b/vendor/github.com/containers/image/v5/sif/transport.go
@@ -0,0 +1,164 @@
+package sif
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/image/v5/directory/explicitfilepath"
+ "github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/image"
+ "github.com/containers/image/v5/transports"
+ "github.com/containers/image/v5/types"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for SIF images.
+var Transport = sifTransport{}
+
+type sifTransport struct{}
+
+func (t sifTransport) Name() string {
+ return "sif"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return NewReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error {
+ if !strings.HasPrefix(scope, "/") {
+ return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
+ }
+ // Refuse also "/", otherwise "/" and "" would have the same semantics,
+ // and "" could be unexpectedly shadowed by the "/" entry.
+ if scope == "/" {
+ return errors.New(`Invalid scope "/": Use the generic default scope ""`)
+ }
+ cleaned := filepath.Clean(scope)
+ if cleaned != scope {
+ return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
+ }
+ return nil
+}
+
+// sifReference is an ImageReference for SIF images.
+type sifReference struct {
+ // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
+ // Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on.
+
+ // Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid
+ // being exposed to symlinks and renames in the parent directories to the working directory).
+ // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
+ file string // As specified by the user. May be relative, contain symlinks, etc.
+ resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
+}
+
+// There is no sif.ParseReference because it is rather pointless.
+// Callers who need a transport-independent interface will go through
+// sifTransport.ParseReference; callers who intentionally deal with SIF files
+// can use sif.NewReference.
+
+// NewReference returns an image file reference for a specified path.
+func NewReference(file string) (types.ImageReference, error) {
+ // We do not expose an API supplying the resolvedFile; we could, but recomputing it
+ // is generally cheap enough that we prefer being confident about the properties of resolvedFile.
+ resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
+ if err != nil {
+ return nil, err
+ }
+ return sifReference{file: file, resolvedFile: resolved}, nil
+}
+
+func (ref sifReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
+// instead, see transports.ImageName().
+func (ref sifReference) StringWithinTransport() string {
+ return ref.file
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref sifReference) DockerReference() reference.Named {
+ return nil
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref sifReference) PolicyConfigurationIdentity() string {
+ return ref.resolvedFile
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref sifReference) PolicyConfigurationNamespaces() []string {
+ res := []string{}
+ path := ref.resolvedFile
+ for {
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash == -1 || lastSlash == 0 {
+ break
+ }
+ path = path[:lastSlash]
+ res = append(res, path)
+ }
+ // Note that we do not include "/"; it is redundant with the default "" global default,
+ // and rejected by sifTransport.ValidatePolicyConfigurationScope above.
+ return res
+}
+
+// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned ImageCloser.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
+func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
+ src, err := newImageSource(ctx, sys, ref)
+ if err != nil {
+ return nil, err
+ }
+ return image.FromSource(ctx, sys, src)
+}
+
+// NewImageSource returns a types.ImageSource for this reference.
+// The caller must call .Close() on the returned ImageSource.
+func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
+ return newImageSource(ctx, sys, ref)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
+ return nil, errors.New(`"sif:" locations can only be read from, not written to`)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
+ return errors.New("Deleting images not implemented for sif: images")
+}
diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go
index 07fdd42a9..8e9ce0dd2 100644
--- a/vendor/github.com/containers/image/v5/signature/docker.go
+++ b/vendor/github.com/containers/image/v5/signature/docker.go
@@ -3,22 +3,46 @@
package signature
import (
+ "errors"
"fmt"
+ "strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/opencontainers/go-digest"
)
+// SignOptions includes optional parameters for signing container images.
+type SignOptions struct {
+ // Passphare to use when signing with the key identity.
+ Passphrase string
+}
+
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
-// using mech and keyIdentity.
-func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
+// using mech and keyIdentity, and the specified options.
+func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) {
manifestDigest, err := manifest.Digest(m)
if err != nil {
return nil, err
}
sig := newUntrustedSignature(manifestDigest, dockerReference)
- return sig.sign(mech, keyIdentity)
+
+ var passphrase string
+ if options != nil {
+ passphrase = options.Passphrase
+ // The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior.
+ if strings.Contains(passphrase, "\n") {
+ return nil, errors.New("invalid passphrase: must not contain a line break")
+ }
+ }
+
+ return sig.sign(mech, keyIdentity, passphrase)
+}
+
+// SignDockerManifest returns a signature for manifest as the specified dockerReference,
+// using mech and keyIdentity.
+func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
+ return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil)
}
// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism.go b/vendor/github.com/containers/image/v5/signature/mechanism.go
index ee3442cdf..9a32a4364 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism.go
@@ -18,8 +18,6 @@ import (
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
// Each mechanism should eventually be closed by calling Close().
-// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
-// eliminate ambiguities, support CA signatures and perhaps other key properties)
type SigningMechanism interface {
// Close removes resources associated with the mechanism, if any.
Close() error
@@ -38,6 +36,15 @@ type SigningMechanism interface {
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
}
+// signingMechanismWithPassphrase is an internal extension of SigningMechanism.
+type signingMechanismWithPassphrase interface {
+ SigningMechanism
+
+ // Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
+ // Fails with a SigningNotSupportedError if the mechanism does not support signing.
+ SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error)
+}
+
// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that.
type SigningNotSupportedError string
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
index 6ae74d430..c166fb32d 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
@@ -5,11 +5,12 @@ package signature
import (
"bytes"
+ "errors"
"fmt"
"io/ioutil"
"os"
- "github.com/mtrmac/gpgme"
+ "github.com/proglottis/gpgme"
)
// A GPG/OpenPGP signing mechanism, implemented using gpgme.
@@ -20,7 +21,7 @@ type gpgmeSigningMechanism struct {
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
// The caller must call .Close() on the returned SigningMechanism.
-func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
ctx, err := newGPGMEContext(optionalDir)
if err != nil {
return nil, err
@@ -35,7 +36,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
// recognizes _only_ public keys from the supplied blob, and returns the identities
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
-func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) {
dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
if err != nil {
return nil, nil, err
@@ -117,9 +118,9 @@ func (m *gpgmeSigningMechanism) SupportsSigning() error {
return nil
}
-// Sign creates a (non-detached) signature of input using keyIdentity.
+// Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
-func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
key, err := m.ctx.GetKey(keyIdentity, true)
if err != nil {
return nil, err
@@ -133,12 +134,38 @@ func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte,
if err != nil {
return nil, err
}
+
+ if passphrase != "" {
+ // Callback to write the passphrase to the specified file descriptor.
+ callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error {
+ if prevWasBad {
+ return errors.New("bad passphrase")
+ }
+ _, err := gpgmeFD.WriteString(passphrase + "\n")
+ return err
+ }
+ if err := m.ctx.SetCallback(callback); err != nil {
+ return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err)
+ }
+
+ // Loopback mode will use the callback instead of prompting the user.
+ if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil {
+ return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err)
+ }
+ }
+
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
return nil, err
}
return sigBuffer.Bytes(), nil
}
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return m.SignWithPassphrase(input, keyIdentity, "")
+}
+
// Verify parses unverifiedSignature and returns the content and the signer's identity
func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
signedBuffer := bytes.Buffer{}
diff --git a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
index 2f5ebb171..7a31425f1 100644
--- a/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
+++ b/vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
@@ -30,7 +30,7 @@ type openpgpSigningMechanism struct {
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
// The caller must call .Close() on the returned SigningMechanism.
-func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
m := &openpgpSigningMechanism{
keyring: openpgp.EntityList{},
}
@@ -61,7 +61,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
// recognizes _only_ public keys from the supplied blob, and returns the identities
// of these keys.
// The caller must call .Close() on the returned SigningMechanism.
-func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) {
m := &openpgpSigningMechanism{
keyring: openpgp.EntityList{},
}
@@ -110,10 +110,16 @@ func (m *openpgpSigningMechanism) SupportsSigning() error {
// Sign creates a (non-detached) signature of input using keyIdentity.
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
-func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
}
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return m.SignWithPassphrase(input, keyIdentity, "")
+}
+
// Verify parses unverifiedSignature and returns the content and the signer's identity
func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil)
diff --git a/vendor/github.com/containers/image/v5/signature/signature.go b/vendor/github.com/containers/image/v5/signature/signature.go
index 09f4f85e0..05bf8229e 100644
--- a/vendor/github.com/containers/image/v5/signature/signature.go
+++ b/vendor/github.com/containers/image/v5/signature/signature.go
@@ -190,12 +190,20 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
// of the system just because it is a private key — actually the presence of a private key
// on the system increases the likelihood of an a successful attack on that private key
// on that particular system.)
-func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
+func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) {
json, err := json.Marshal(s)
if err != nil {
return nil, err
}
+ if newMech, ok := mech.(signingMechanismWithPassphrase); ok {
+ return newMech.SignWithPassphrase(json, keyIdentity, passphrase)
+ }
+
+ if passphrase != "" {
+ return nil, errors.New("signing mechanism does not support passphrases")
+ }
+
return mech.Sign(json, keyIdentity)
}
diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
index 2110a091d..0bae8b259 100644
--- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
+++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
@@ -12,7 +12,9 @@ import (
_ "github.com/containers/image/v5/oci/archive"
_ "github.com/containers/image/v5/oci/layout"
_ "github.com/containers/image/v5/openshift"
+ _ "github.com/containers/image/v5/sif"
_ "github.com/containers/image/v5/tarball"
+
// The ostree transport is registered by ostree*.go
// The storage transport is registered by storage*.go
"github.com/containers/image/v5/transports"
diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go
index a743ca7a9..75c53959f 100644
--- a/vendor/github.com/containers/image/v5/version/version.go
+++ b/vendor/github.com/containers/image/v5/version/version.go
@@ -6,7 +6,7 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 18
+ VersionMinor = 19
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
diff --git a/vendor/github.com/containers/psgo/go.mod b/vendor/github.com/containers/psgo/go.mod
index fd19d9b48..1489be7c3 100644
--- a/vendor/github.com/containers/psgo/go.mod
+++ b/vendor/github.com/containers/psgo/go.mod
@@ -3,7 +3,8 @@ module github.com/containers/psgo
go 1.14
require (
- github.com/opencontainers/runc v1.0.2
+ github.com/containers/storage v1.38.0
+ github.com/opencontainers/runc v1.1.0
github.com/stretchr/testify v1.7.0
- golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2
+ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
)
diff --git a/vendor/github.com/containers/psgo/go.sum b/vendor/github.com/containers/psgo/go.sum
index 85b0f4ff7..71bfe7abe 100644
--- a/vendor/github.com/containers/psgo/go.sum
+++ b/vendor/github.com/containers/psgo/go.sum
@@ -1,77 +1,1041 @@
+bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
+github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
+github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
+github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
+github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
+github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
+github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
+github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
+github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
+github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
+github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
+github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
+github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
+github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
+github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
+github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
+github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containers/storage v1.38.0 h1:QTgqmtQeb2tk1VucK0nZwCJKmlVLZGybrMMMlixedFY=
+github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
+github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
+github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
+github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
-github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
+github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
+github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 h1:c8PlLMqBbOHoqtjteWm5/kbe6rNY2pbRfbIMVnepueo=
-golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
+k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
+k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
+k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
+k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
+k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
+k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
+k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
+k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
+k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
+k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
+k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/vendor/github.com/containers/psgo/internal/proc/ns.go b/vendor/github.com/containers/psgo/internal/proc/ns.go
index 28ee6a2c9..9e77b865b 100644
--- a/vendor/github.com/containers/psgo/internal/proc/ns.go
+++ b/vendor/github.com/containers/psgo/internal/proc/ns.go
@@ -19,13 +19,9 @@ import (
"fmt"
"io"
"os"
-)
-type IDMap struct {
- ContainerID int
- HostID int
- Size int
-}
+ "github.com/containers/storage/pkg/idtools"
+)
// ParsePIDNamespace returns the content of /proc/$pid/ns/pid.
func ParsePIDNamespace(pid string) (string, error) {
@@ -46,14 +42,14 @@ func ParseUserNamespace(pid string) (string, error) {
}
// ReadMappings reads the user namespace mappings at the specified path
-func ReadMappings(path string) ([]IDMap, error) {
+func ReadMappings(path string) ([]idtools.IDMap, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
- mappings := []IDMap{}
+ var mappings []idtools.IDMap
buf := bufio.NewReader(file)
for {
@@ -68,10 +64,10 @@ func ReadMappings(path string) ([]IDMap, error) {
return mappings, nil
}
- containerID, hostID, size := 0, 0, 0
+ var containerID, hostID, size int
if _, err := fmt.Sscanf(string(line), "%d %d %d", &containerID, &hostID, &size); err != nil {
return nil, fmt.Errorf("cannot parse %s: %w", string(line), err)
}
- mappings = append(mappings, IDMap{ContainerID: containerID, HostID: hostID, Size: size})
+ mappings = append(mappings, idtools.IDMap{ContainerID: containerID, HostID: hostID, Size: size})
}
}
diff --git a/vendor/github.com/containers/psgo/internal/proc/status.go b/vendor/github.com/containers/psgo/internal/proc/status.go
index 1896b5c07..1d2247cbd 100644
--- a/vendor/github.com/containers/psgo/internal/proc/status.go
+++ b/vendor/github.com/containers/psgo/internal/proc/status.go
@@ -18,8 +18,11 @@ import (
"bufio"
"fmt"
"os"
- "os/exec"
+ "strconv"
"strings"
+ "sync"
+
+ "github.com/containers/storage/pkg/idtools"
)
// Status is a direct translation of a `/proc/[pid]/status`, which provides much
@@ -173,23 +176,8 @@ type Status struct {
NonvoluntaryCtxtSwitches string
}
-// readStatusUserNS joins the user namespace of pid and returns the content of
-// /proc/pid/status as a string slice.
-func readStatusUserNS(pid string) ([]string, error) {
- path := fmt.Sprintf("/proc/%s/status", pid)
- args := []string{"nsenter", "-U", "-t", pid, "cat", path}
-
- c := exec.Command(args[0], args[1:]...)
- output, err := c.CombinedOutput()
- if err != nil {
- return nil, fmt.Errorf("error executing %q: %w", strings.Join(args, " "), err)
- }
-
- return strings.Split(string(output), "\n"), nil
-}
-
-// readStatusDefault returns the content of /proc/pid/status as a string slice.
-func readStatusDefault(pid string) ([]string, error) {
+// readStatus returns the content of /proc/pid/status as a string slice.
+func readStatus(pid string) ([]string, error) {
path := fmt.Sprintf("/proc/%s/status", pid)
f, err := os.Open(path)
if err != nil {
@@ -203,21 +191,81 @@ func readStatusDefault(pid string) ([]string, error) {
return lines, nil
}
-// ParseStatus parses the /proc/$pid/status file and returns a *Status.
-func ParseStatus(pid string, joinUserNS bool) (*Status, error) {
- var lines []string
- var err error
+// mapField maps a single string-typed ID field given the set of mappings. If
+// no mapping exists, the overflow uid/gid is used.
+func mapStatusField(field *string, mapping []idtools.IDMap, overflow string) {
+ hostId, err := strconv.Atoi(*field)
+ if err != nil {
+ *field = overflow
+ return
+ }
+ contId, err := idtools.RawToContainer(hostId, mapping)
+ if err != nil {
+ *field = overflow
+ return
+ }
+ *field = strconv.Itoa(contId)
+}
- if joinUserNS {
- lines, err = readStatusUserNS(pid)
- } else {
- lines, err = readStatusDefault(pid)
+var (
+ overflowOnce sync.Once
+ overflowUid = "65534"
+ overflowGid = "65534"
+)
+
+func overflowIds() (string, string) {
+ overflowOnce.Do(func() {
+ if uid, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil {
+ overflowUid = strings.TrimSpace(string(uid))
+ }
+ if gid, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil {
+ overflowGid = strings.TrimSpace(string(gid))
+ }
+ })
+ return overflowUid, overflowGid
+}
+
+// mapStatus takes a Status struct and remaps all of the relevant fields to
+// match the user namespace of the target process.
+func mapStatus(pid string, status *Status) (*Status, error) {
+ uidMap, err := ReadMappings(fmt.Sprintf("/proc/%s/uid_map", pid))
+ if err != nil {
+ return nil, err
+ }
+ gidMap, err := ReadMappings(fmt.Sprintf("/proc/%s/gid_map", pid))
+ if err != nil {
+ return nil, err
+ }
+ overflowUid, overflowGid := overflowIds()
+ for i := range status.Uids {
+ mapStatusField(&status.Uids[i], uidMap, overflowUid)
+ }
+ for i := range status.Gids {
+ mapStatusField(&status.Gids[i], gidMap, overflowGid)
}
+ for i := range status.Groups {
+ mapStatusField(&status.Groups[i], gidMap, overflowGid)
+ }
+ return status, nil
+}
+// ParseStatus parses the /proc/$pid/status file and returns a *Status.
+func ParseStatus(pid string, mapUserNS bool) (*Status, error) {
+ lines, err := readStatus(pid)
+ if err != nil {
+ return nil, err
+ }
+ status, err := parseStatus(pid, lines)
if err != nil {
return nil, err
}
- return parseStatus(pid, lines)
+ if mapUserNS {
+ status, err = mapStatus(pid, status)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return status, nil
}
// parseStatus extracts data from lines and returns a *Status.
diff --git a/vendor/github.com/containers/psgo/psgo.go b/vendor/github.com/containers/psgo/psgo.go
index ea893e7ca..d6cfcef4d 100644
--- a/vendor/github.com/containers/psgo/psgo.go
+++ b/vendor/github.com/containers/psgo/psgo.go
@@ -41,28 +41,18 @@ import (
"github.com/containers/psgo/internal/dev"
"github.com/containers/psgo/internal/proc"
"github.com/containers/psgo/internal/process"
+ "github.com/containers/storage/pkg/idtools"
"golang.org/x/sys/unix"
)
-// IDMap specifies a mapping range from the host to the container IDs.
-type IDMap struct {
- // ContainerID is the first ID in the container.
- ContainerID int
- // HostID is the first ID in the host.
- HostID int
- // Size specifies how long is the range. e.g. 1 means a single user
- // is mapped.
- Size int
-}
-
// JoinNamespaceOpts specifies different options for joining the specified namespaces.
type JoinNamespaceOpts struct {
// UIDMap specifies a mapping for UIDs in the container. If specified
// huser will perform the reverse mapping.
- UIDMap []IDMap
+ UIDMap []idtools.IDMap
// GIDMap specifies a mapping for GIDs in the container. If specified
// hgroup will perform the reverse mapping.
- GIDMap []IDMap
+ GIDMap []idtools.IDMap
// FillMappings specified whether UIDMap and GIDMap must be initialized
// with the current user namespace.
@@ -102,7 +92,7 @@ type aixFormatDescriptor struct {
}
// findID converts the specified id to the host mapping
-func findID(idStr string, mapping []IDMap, lookupFunc func(uid string) (string, error), overflowFile string) (string, error) {
+func findID(idStr string, mapping []idtools.IDMap, lookupFunc func(uid string) (string, error), overflowFile string) (string, error) {
if len(mapping) == 0 {
return idStr, nil
}
@@ -350,29 +340,16 @@ func JoinNamespaceAndProcessInfo(pid string, descriptors []string) ([][]string,
return JoinNamespaceAndProcessInfoWithOptions(pid, descriptors, &JoinNamespaceOpts{})
}
-func readMappings(path string) ([]IDMap, error) {
- mappings, err := proc.ReadMappings(path)
- if err != nil {
- return nil, err
- }
- var res []IDMap
- for _, i := range mappings {
- m := IDMap{ContainerID: i.ContainerID, HostID: i.HostID, Size: i.Size}
- res = append(res, m)
- }
- return res, nil
-}
-
func contextFromOptions(options *JoinNamespaceOpts) (*psContext, error) {
ctx := new(psContext)
ctx.opts = options
if ctx.opts != nil && ctx.opts.FillMappings {
- uidMappings, err := readMappings("/proc/self/uid_map")
+ uidMappings, err := proc.ReadMappings("/proc/self/uid_map")
if err != nil {
return nil, err
}
- gidMappings, err := readMappings("/proc/self/gid_map")
+ gidMappings, err := proc.ReadMappings("/proc/self/gid_map")
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile
index dbc1f7c99..d7ca0c1c4 100644
--- a/vendor/github.com/containers/storage/Makefile
+++ b/vendor/github.com/containers/storage/Makefile
@@ -51,6 +51,9 @@ sources := $(wildcard *.go cmd/containers-storage/*.go drivers/*.go drivers/*/*.
containers-storage: $(sources) ## build using gc on the host
$(GO) build $(MOD_VENDOR) -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
+codespell:
+ codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L flate,uint,iff,od,ERRO -w
+
binary local-binary: containers-storage
local-gccgo: ## build using gccgo on the host
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION
index 97dd8ea50..c85090d66 100644
--- a/vendor/github.com/containers/storage/VERSION
+++ b/vendor/github.com/containers/storage/VERSION
@@ -1 +1 @@
-1.37.0+dev
+1.38.2
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index a534630df..b7e681ace 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -138,6 +138,7 @@ func (gdw *NaiveDiffDriver) Changes(id string, idMappings *idtools.IDMappings, p
if parent != "" {
options := MountOpts{
MountLabel: mountLabel,
+ Options: []string{"ro"},
}
parentFs, err = driver.Get(parent, options)
if err != nil {
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 15ba57467..b22f9dfb2 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -1055,17 +1055,22 @@ func (d *Driver) getLower(parent string) (string, error) {
}
func (d *Driver) dir(id string) string {
+ p, _ := d.dir2(id)
+ return p
+}
+
+func (d *Driver) dir2(id string) (string, bool) {
newpath := path.Join(d.home, id)
if _, err := os.Stat(newpath); err != nil {
for _, p := range d.AdditionalImageStores() {
l := path.Join(p, d.name, id)
_, err = os.Stat(l)
if err == nil {
- return l
+ return l, true
}
}
}
- return newpath
+ return newpath, false
}
func (d *Driver) getLowerDirs(id string) ([]string, error) {
@@ -1260,11 +1265,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
d.locker.Lock(id)
defer d.locker.Unlock(id)
- dir := d.dir(id)
+ dir, inAdditionalStore := d.dir2(id)
if _, err := os.Stat(dir); err != nil {
return "", err
}
- readWrite := true
+ readWrite := !inAdditionalStore
if !d.SupportsShifting() || options.DisableShifting {
disableShifting = true
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod
index 96ca1f0b2..a2aff4902 100644
--- a/vendor/github.com/containers/storage/go.mod
+++ b/vendor/github.com/containers/storage/go.mod
@@ -3,22 +3,22 @@ go 1.14
module github.com/containers/storage
require (
- github.com/BurntSushi/toml v0.4.1
+ github.com/BurntSushi/toml v1.0.0
github.com/Microsoft/go-winio v0.5.1
- github.com/Microsoft/hcsshim v0.9.1
- github.com/containerd/stargz-snapshotter/estargz v0.10.1
+ github.com/Microsoft/hcsshim v0.9.2
+ github.com/containerd/stargz-snapshotter/estargz v0.11.0
github.com/cyphar/filepath-securejoin v0.2.3
github.com/docker/go-units v0.4.0
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
- github.com/klauspost/compress v1.13.6
+ github.com/klauspost/compress v1.14.2
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
github.com/moby/sys/mountinfo v0.5.0
github.com/opencontainers/go-digest v1.0.0
- github.com/opencontainers/runc v1.0.3
+ github.com/opencontainers/runc v1.1.0
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/opencontainers/selinux v1.10.0
github.com/pkg/errors v0.9.1
@@ -29,6 +29,6 @@ require (
github.com/ulikunitz/xz v0.5.10
github.com/vbatts/tar-split v0.11.2
golang.org/x/net v0.0.0-20210825183410-e898025ed96a
- golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359
+ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
gotest.tools v2.2.0+incompatible
)
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum
index c7262fe7a..b211efd37 100644
--- a/vendor/github.com/containers/storage/go.sum
+++ b/vendor/github.com/containers/storage/go.sum
@@ -36,8 +36,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
-github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
+github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
@@ -57,8 +57,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
-github.com/Microsoft/hcsshim v0.9.1 h1:VfDCj+QnY19ktX5TsH22JHcjaZ05RWQiwDbOyEg5ziM=
-github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM=
+github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
+github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -98,6 +98,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -106,6 +107,7 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -131,6 +133,7 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -173,8 +176,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
-github.com/containerd/stargz-snapshotter/estargz v0.10.1 h1:hd1EoVjI2Ax8Cr64tdYqnJ4i4pZU49FkEf5kU8KxQng=
-github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
+github.com/containerd/stargz-snapshotter/estargz v0.11.0 h1:t0IW5kOmY7AXDAWRUs2uVzDhijAUOAYVr/dyRhOQvBg=
+github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
@@ -296,6 +299,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -420,8 +424,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
-github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
+github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -515,8 +519,8 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k=
-github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
+github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -578,6 +582,7 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -839,8 +844,11 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index e7c4cfcf1..677a15edd 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -511,6 +511,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return err
}
}
+ if fi.Mode()&os.ModeSocket != 0 {
+ logrus.Warnf("archive: skipping %q since it is a socket", path)
+ return nil
+ }
hdr, err := FileInfoHeader(name, fi, link)
if err != nil {
@@ -969,7 +973,10 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
whiteoutConverter := GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
buffer := make([]byte, 1<<20)
+ doChown := !options.NoLchown
if options.ForceMask != nil {
+ // if ForceMask is in place, make sure lchown is disabled.
+ doChown = false
uid, gid, mode, err := GetFileOwner(dest)
if err == nil {
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode)
@@ -1074,7 +1081,7 @@ loop:
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
}
- if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
+ if err = createTarFile(path, dest, hdr, trBuf, doChown, chownOpts, options.InUserNS, options.IgnoreChownErrors, options.ForceMask, buffer); err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
new file mode 100644
index 000000000..a931fb5d1
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -0,0 +1,630 @@
+package chunked
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+
+ storage "github.com/containers/storage"
+ "github.com/containers/storage/pkg/chunked/internal"
+ "github.com/containers/storage/pkg/ioutils"
+ jsoniter "github.com/json-iterator/go"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ cacheKey = "chunked-manifest-cache"
+ cacheVersion = 1
+)
+
+type metadata struct {
+ tagLen int
+ digestLen int
+ tags []byte
+ vdata []byte
+}
+
+type layer struct {
+ id string
+ metadata *metadata
+ target string
+}
+
+type layersCache struct {
+ layers []layer
+ refs int
+ store storage.Store
+ mutex sync.RWMutex
+ created time.Time
+}
+
+var cacheMutex sync.Mutex
+var cache *layersCache
+
+func (c *layersCache) release() {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+
+ c.refs--
+ if c.refs == 0 {
+ cache = nil
+ }
+}
+
+func getLayersCacheRef(store storage.Store) *layersCache {
+ cacheMutex.Lock()
+ defer cacheMutex.Unlock()
+ if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
+ cache.refs++
+ return cache
+ }
+ cache := &layersCache{
+ store: store,
+ refs: 1,
+ created: time.Now(),
+ }
+ return cache
+}
+
+func getLayersCache(store storage.Store) (*layersCache, error) {
+ c := getLayersCacheRef(store)
+
+ if err := c.load(); err != nil {
+ c.release()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (c *layersCache) load() error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ allLayers, err := c.store.Layers()
+ if err != nil {
+ return err
+ }
+ existingLayers := make(map[string]string)
+ for _, r := range c.layers {
+ existingLayers[r.id] = r.target
+ }
+
+ currentLayers := make(map[string]string)
+ for _, r := range allLayers {
+ currentLayers[r.ID] = r.ID
+ if _, found := existingLayers[r.ID]; found {
+ continue
+ }
+
+ bigData, err := c.store.LayerBigData(r.ID, cacheKey)
+ if err != nil {
+ if errors.Cause(err) == os.ErrNotExist {
+ continue
+ }
+ return err
+ }
+ defer bigData.Close()
+
+ metadata, err := readMetadataFromCache(bigData)
+ if err != nil {
+ logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err)
+ }
+
+ if metadata != nil {
+ c.addLayer(r.ID, metadata)
+ continue
+ }
+
+ manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
+ if err != nil {
+ continue
+ }
+ defer manifestReader.Close()
+ manifest, err := ioutil.ReadAll(manifestReader)
+ if err != nil {
+ return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
+ }
+
+ metadata, err = writeCache(manifest, r.ID, c.store)
+ if err == nil {
+ c.addLayer(r.ID, metadata)
+ }
+ }
+
+ var newLayers []layer
+ for _, l := range c.layers {
+ if _, found := currentLayers[l.id]; found {
+ newLayers = append(newLayers, l)
+ }
+ }
+ c.layers = newLayers
+
+ return nil
+}
+
+// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file
+// is usable for deduplication with hardlinks.
+// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs.
+func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) {
+ digester := digest.Canonical.Digester()
+
+ modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode)
+ hash := digester.Hash()
+
+ if _, err := hash.Write([]byte(f.Digest)); err != nil {
+ return "", err
+ }
+
+ if _, err := hash.Write([]byte(modeString)); err != nil {
+ return "", err
+ }
+
+ if len(f.Xattrs) > 0 {
+ keys := make([]string, 0, len(f.Xattrs))
+ for k := range f.Xattrs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ if _, err := hash.Write([]byte(k)); err != nil {
+ return "", err
+ }
+ if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil {
+ return "", err
+ }
+ }
+ }
+ return string(digester.Digest()), nil
+}
+
+// generateFileLocation generates a file location in the form $OFFSET@$PATH
+func generateFileLocation(path string, offset uint64) []byte {
+ return []byte(fmt.Sprintf("%d@%s", offset, path))
+}
+
+// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
+// the [OFFSET; LEN] points to the variable length data where the file locations
+// are stored. $DIGEST has length digestLen stored in the metadata file header.
+func generateTag(digest string, offset, len uint64) string {
+ return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len)
+}
+
+type setBigData interface {
+ // SetLayerBigData stores a (possibly large) chunk of named data
+ SetLayerBigData(id, key string, data io.Reader) error
+}
+
+// writeCache write a cache for the layer ID.
+// It generates a sorted list of digests with their offset to the path location and offset.
+// The same cache is used to lookup files, chunks and candidates for deduplication with hard links.
+// There are 3 kind of digests stored:
+// - digest(file.payload))
+// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
+// - digest(i) for each i in chunks(file payload)
+func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) {
+ var vdata bytes.Buffer
+ tagLen := 0
+ digestLen := 0
+ var tagsBuffer bytes.Buffer
+
+ toc, err := prepareMetadata(manifest)
+ if err != nil {
+ return nil, err
+ }
+
+ var tags []string
+ for _, k := range toc {
+ if k.Digest != "" {
+ location := generateFileLocation(k.Name, 0)
+
+ off := uint64(vdata.Len())
+ l := uint64(len(location))
+
+ d := generateTag(k.Digest, off, l)
+ if tagLen == 0 {
+ tagLen = len(d)
+ }
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ fp, err := calculateHardLinkFingerprint(k)
+ if err != nil {
+ return nil, err
+ }
+ d = generateTag(fp, off, l)
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ if _, err := vdata.Write(location); err != nil {
+ return nil, err
+ }
+
+ digestLen = len(k.Digest)
+ }
+ if k.ChunkDigest != "" {
+ location := generateFileLocation(k.Name, uint64(k.ChunkOffset))
+ off := uint64(vdata.Len())
+ l := uint64(len(location))
+ d := generateTag(k.ChunkDigest, off, l)
+ if tagLen == 0 {
+ tagLen = len(d)
+ }
+ if tagLen != len(d) {
+ return nil, errors.New("digest with different length found")
+ }
+ tags = append(tags, d)
+
+ if _, err := vdata.Write(location); err != nil {
+ return nil, err
+ }
+ digestLen = len(k.ChunkDigest)
+ }
+ }
+
+ sort.Strings(tags)
+
+ for _, t := range tags {
+ if _, err := tagsBuffer.Write([]byte(t)); err != nil {
+ return nil, err
+ }
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+ errChan := make(chan error, 1)
+ go func() {
+ defer pipeWriter.Close()
+ defer close(errChan)
+
+ // version
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil {
+ errChan <- err
+ return
+ }
+
+ // len of a tag
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil {
+ errChan <- err
+ return
+ }
+
+ // len of a digest
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil {
+ errChan <- err
+ return
+ }
+
+ // tags length
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil {
+ errChan <- err
+ return
+ }
+
+ // vdata length
+ if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil {
+ errChan <- err
+ return
+ }
+
+ // tags
+ if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil {
+ errChan <- err
+ return
+ }
+
+ // variable length data
+ if _, err := pipeWriter.Write(vdata.Bytes()); err != nil {
+ errChan <- err
+ return
+ }
+
+ errChan <- nil
+ }()
+ defer pipeReader.Close()
+
+ counter := ioutils.NewWriteCounter(ioutil.Discard)
+
+ r := io.TeeReader(pipeReader, counter)
+
+ if err := dest.SetLayerBigData(id, cacheKey, r); err != nil {
+ return nil, err
+ }
+
+ if err := <-errChan; err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count)
+
+ return &metadata{
+ digestLen: digestLen,
+ tagLen: tagLen,
+ tags: tagsBuffer.Bytes(),
+ vdata: vdata.Bytes(),
+ }, nil
+}
+
+func readMetadataFromCache(bigData io.Reader) (*metadata, error) {
+ var version, tagLen, digestLen, tagsLen, vdataLen uint64
+ if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil {
+ return nil, err
+ }
+ if version != cacheVersion {
+ return nil, nil
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil {
+ return nil, err
+ }
+ if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil {
+ return nil, err
+ }
+
+ tags := make([]byte, tagsLen)
+ if _, err := bigData.Read(tags); err != nil {
+ return nil, err
+ }
+
+ vdata := make([]byte, vdataLen)
+ if _, err := bigData.Read(vdata); err != nil {
+ return nil, err
+ }
+
+ return &metadata{
+ tagLen: int(tagLen),
+ digestLen: int(digestLen),
+ tags: tags,
+ vdata: vdata,
+ }, nil
+}
+
+func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
+ toc, err := unmarshalToc(manifest)
+ if err != nil {
+ // ignore errors here. They might be caused by a different manifest format.
+ return nil, nil
+ }
+
+ var r []*internal.FileMetadata
+ chunkSeen := make(map[string]bool)
+ for i := range toc.Entries {
+ d := toc.Entries[i].Digest
+ if d != "" {
+ r = append(r, &toc.Entries[i])
+ continue
+ }
+
+ // chunks do not use hard link dedup so keeping just one candidate is enough
+ cd := toc.Entries[i].ChunkDigest
+ if cd != "" && !chunkSeen[cd] {
+ r = append(r, &toc.Entries[i])
+ chunkSeen[cd] = true
+ }
+ }
+ return r, nil
+}
+
+func (c *layersCache) addLayer(id string, metadata *metadata) error {
+ target, err := c.store.DifferTarget(id)
+ if err != nil {
+ return fmt.Errorf("get checkout directory layer %q: %w", id, err)
+ }
+
+ l := layer{
+ id: id,
+ metadata: metadata,
+ target: target,
+ }
+ c.layers = append(c.layers, l)
+ return nil
+}
+
+func byteSliceAsString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func findTag(digest string, metadata *metadata) (string, uint64, uint64) {
+ if len(digest) != metadata.digestLen {
+ return "", 0, 0
+ }
+
+ nElements := len(metadata.tags) / metadata.tagLen
+
+ i := sort.Search(nElements, func(i int) bool {
+ d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen])
+ return strings.Compare(d, digest) >= 0
+ })
+ if i < nElements {
+ d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)])
+ if digest == d {
+ startOff := i*metadata.tagLen + metadata.digestLen
+ parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@")
+ off, _ := strconv.ParseInt(parts[0], 10, 64)
+ len, _ := strconv.ParseInt(parts[1], 10, 64)
+ return digest, uint64(off), uint64(len)
+ }
+ }
+ return "", 0, 0
+}
+
+func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) {
+ if digest == "" {
+ return "", "", -1, nil
+ }
+
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ for _, layer := range c.layers {
+ digest, off, len := findTag(digest, layer.metadata)
+ if digest != "" {
+ position := string(layer.metadata.vdata[off : off+len])
+ parts := strings.SplitN(position, "@", 2)
+ offFile, _ := strconv.ParseInt(parts[0], 10, 64)
+ return layer.target, parts[1], offFile, nil
+ }
+ }
+
+ return "", "", -1, nil
+}
+
+// findFileInOtherLayers finds the specified file in other layers.
+// file is the file to look for.
+func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) {
+ digest := file.Digest
+ if useHardLinks {
+ var err error
+ digest, err = calculateHardLinkFingerprint(file)
+ if err != nil {
+ return "", "", err
+ }
+ }
+ target, name, off, err := c.findDigestInternal(digest)
+ if off == 0 {
+ return target, name, err
+ }
+ return "", "", nil
+}
+
+func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
+ return c.findDigestInternal(chunk.ChunkDigest)
+}
+
+func unmarshalToc(manifest []byte) (*internal.TOC, error) {
+ var buf bytes.Buffer
+ count := 0
+ var toc internal.TOC
+
+ iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ if field != "entries" {
+ iter.Skip()
+ continue
+ }
+ for iter.ReadArray() {
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ switch field {
+ case "type", "name", "linkName", "digest", "chunkDigest", "chunkType":
+ count += len(iter.ReadStringAsSlice())
+ case "xattrs":
+ for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
+ count += len(iter.ReadStringAsSlice())
+ }
+ default:
+ iter.Skip()
+ }
+ }
+ }
+ break
+ }
+
+ buf.Grow(count)
+
+ getString := func(b []byte) string {
+ from := buf.Len()
+ buf.Write(b)
+ to := buf.Len()
+ return byteSliceAsString(buf.Bytes()[from:to])
+ }
+
+ iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ if field == "version" {
+ toc.Version = iter.ReadInt()
+ continue
+ }
+ if field != "entries" {
+ iter.Skip()
+ continue
+ }
+ for iter.ReadArray() {
+ var m internal.FileMetadata
+ for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
+ switch field {
+ case "type":
+ m.Type = getString(iter.ReadStringAsSlice())
+ case "name":
+ m.Name = getString(iter.ReadStringAsSlice())
+ case "linkName":
+ m.Linkname = getString(iter.ReadStringAsSlice())
+ case "mode":
+ m.Mode = iter.ReadInt64()
+ case "size":
+ m.Size = iter.ReadInt64()
+ case "UID":
+ m.UID = iter.ReadInt()
+ case "GID":
+ m.GID = iter.ReadInt()
+ case "ModTime":
+ time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
+ if err != nil {
+ return nil, err
+ }
+ m.ModTime = &time
+ case "accesstime":
+ time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
+ if err != nil {
+ return nil, err
+ }
+ m.AccessTime = &time
+ case "changetime":
+ time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
+ if err != nil {
+ return nil, err
+ }
+ m.ChangeTime = &time
+ case "devMajor":
+ m.Devmajor = iter.ReadInt64()
+ case "devMinor":
+ m.Devminor = iter.ReadInt64()
+ case "digest":
+ m.Digest = getString(iter.ReadStringAsSlice())
+ case "offset":
+ m.Offset = iter.ReadInt64()
+ case "endOffset":
+ m.EndOffset = iter.ReadInt64()
+ case "chunkSize":
+ m.ChunkSize = iter.ReadInt64()
+ case "chunkOffset":
+ m.ChunkOffset = iter.ReadInt64()
+ case "chunkDigest":
+ m.ChunkDigest = getString(iter.ReadStringAsSlice())
+ case "chunkType":
+ m.ChunkType = getString(iter.ReadStringAsSlice())
+ case "xattrs":
+ m.Xattrs = make(map[string]string)
+ for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
+ value := iter.ReadStringAsSlice()
+ m.Xattrs[key] = getString(value)
+ }
+ default:
+ iter.Skip()
+ }
+ }
+ toc.Entries = append(toc.Entries, m)
+ }
+ break
+ }
+ toc.StringsBuf = buf
+ return &toc, nil
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
index 092cf584a..aeb7cfd4f 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -5,6 +5,7 @@ package compressor
// larger software like the graph drivers.
import (
+ "bufio"
"encoding/base64"
"io"
"io/ioutil"
@@ -15,6 +16,189 @@ import (
"github.com/vbatts/tar-split/archive/tar"
)
+const RollsumBits = 16
+const holesThreshold = int64(1 << 10)
+
+type holesFinder struct {
+ reader *bufio.Reader
+ fileOff int64
+ zeros int64
+ from int64
+ threshold int64
+
+ state int
+}
+
+const (
+ holesFinderStateRead = iota
+ holesFinderStateAccumulate
+ holesFinderStateFound
+ holesFinderStateEOF
+)
+
+// ReadByte reads a single byte from the underlying reader.
+// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil).
+// If there are at least f.THRESHOLD consecutive zeros, then the
+// return value is (N_CONSECUTIVE_ZEROS, '\x00').
+func (f *holesFinder) ReadByte() (int64, byte, error) {
+ for {
+ switch f.state {
+ // reading the file stream
+ case holesFinderStateRead:
+ if f.zeros > 0 {
+ f.zeros--
+ return 0, 0, nil
+ }
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ return 0, b, err
+ }
+
+ if b != 0 {
+ return 0, b, err
+ }
+
+ f.zeros = 1
+ if f.zeros == f.threshold {
+ f.state = holesFinderStateFound
+ } else {
+ f.state = holesFinderStateAccumulate
+ }
+ // accumulating zeros, but still didn't reach the threshold
+ case holesFinderStateAccumulate:
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ f.state = holesFinderStateEOF
+ continue
+ }
+ return 0, b, err
+ }
+
+ if b == 0 {
+ f.zeros++
+ if f.zeros == f.threshold {
+ f.state = holesFinderStateFound
+ }
+ } else {
+ if f.reader.UnreadByte(); err != nil {
+ return 0, 0, err
+ }
+ f.state = holesFinderStateRead
+ }
+ // found a hole. Number of zeros >= threshold
+ case holesFinderStateFound:
+ b, err := f.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ f.state = holesFinderStateEOF
+ }
+ holeLen := f.zeros
+ f.zeros = 0
+ return holeLen, 0, nil
+ }
+ if b != 0 {
+ if f.reader.UnreadByte(); err != nil {
+ return 0, 0, err
+ }
+ f.state = holesFinderStateRead
+
+ holeLen := f.zeros
+ f.zeros = 0
+ return holeLen, 0, nil
+ }
+ f.zeros++
+ // reached EOF. Flush pending zeros if any.
+ case holesFinderStateEOF:
+ if f.zeros > 0 {
+ f.zeros--
+ return 0, 0, nil
+ }
+ return 0, 0, io.EOF
+ }
+ }
+}
+
+type rollingChecksumReader struct {
+ reader *holesFinder
+ closed bool
+ rollsum *RollSum
+ pendingHole int64
+
+ // WrittenOut is the total number of bytes read from
+ // the stream.
+ WrittenOut int64
+
+ // IsLastChunkZeros tells whether the last generated
+ // chunk is a hole (made of consecutive zeros). If it
+ // is false, then the last chunk is a data chunk
+ // generated by the rolling checksum.
+ IsLastChunkZeros bool
+}
+
+func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
+ rc.IsLastChunkZeros = false
+
+ if rc.pendingHole > 0 {
+ toCopy := int64(len(b))
+ if rc.pendingHole < toCopy {
+ toCopy = rc.pendingHole
+ }
+ rc.pendingHole -= toCopy
+ for i := int64(0); i < toCopy; i++ {
+ b[i] = 0
+ }
+
+ rc.WrittenOut += toCopy
+
+ rc.IsLastChunkZeros = true
+
+ // if there are no other zeros left, terminate the chunk
+ return rc.pendingHole == 0, int(toCopy), nil
+ }
+
+ if rc.closed {
+ return false, 0, io.EOF
+ }
+
+ for i := 0; i < len(b); i++ {
+ holeLen, n, err := rc.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ rc.closed = true
+ if i == 0 {
+ return false, 0, err
+ }
+ return false, i, nil
+ }
+ // Report any other error type
+ return false, -1, err
+ }
+ if holeLen > 0 {
+ for j := int64(0); j < holeLen; j++ {
+ rc.rollsum.Roll(0)
+ }
+ rc.pendingHole = holeLen
+ return true, i, nil
+ }
+ b[i] = n
+ rc.WrittenOut++
+ rc.rollsum.Roll(n)
+ if rc.rollsum.OnSplitWithBits(RollsumBits) {
+ return true, i + 1, nil
+ }
+ }
+ return false, len(b), nil
+}
+
+type chunk struct {
+ ChunkOffset int64
+ Offset int64
+ Checksum string
+ ChunkSize int64
+ ChunkType string
+}
+
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
// total written so far. Used to retrieve partial offsets in the file
dest := ioutils.NewWriteCounter(destFile)
@@ -64,40 +248,78 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
if _, err := zstdWriter.Write(rawBytes); err != nil {
return err
}
- payloadDigester := digest.Canonical.Digester()
- payloadChecksum := payloadDigester.Hash()
- payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
+ payloadDigester := digest.Canonical.Digester()
+ chunkDigester := digest.Canonical.Digester()
// Now handle the payload, if any
- var startOffset, endOffset int64
+ startOffset := int64(0)
+ lastOffset := int64(0)
+ lastChunkOffset := int64(0)
+
checksum := ""
+
+ chunks := []chunk{}
+
+ hf := &holesFinder{
+ threshold: holesThreshold,
+ reader: bufio.NewReader(tr),
+ }
+
+ rcReader := &rollingChecksumReader{
+ reader: hf,
+ rollsum: NewRollSum(),
+ }
+
+ payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
for {
- read, errRead := tr.Read(buf)
+ mustSplit, read, errRead := rcReader.Read(buf)
if errRead != nil && errRead != io.EOF {
return err
}
-
- // restart the compression only if there is
- // a payload.
+ // restart the compression only if there is a payload.
if read > 0 {
if startOffset == 0 {
startOffset, err = restartCompression()
if err != nil {
return err
}
+ lastOffset = startOffset
+ }
+
+ if _, err := payloadDest.Write(buf[:read]); err != nil {
+ return err
}
- _, err := payloadDest.Write(buf[:read])
+ }
+ if (mustSplit || errRead == io.EOF) && startOffset > 0 {
+ off, err := restartCompression()
if err != nil {
return err
}
+
+ chunkSize := rcReader.WrittenOut - lastChunkOffset
+ if chunkSize > 0 {
+ chunkType := internal.ChunkTypeData
+ if rcReader.IsLastChunkZeros {
+ chunkType = internal.ChunkTypeZeros
+ }
+
+ chunks = append(chunks, chunk{
+ ChunkOffset: lastChunkOffset,
+ Offset: lastOffset,
+ Checksum: chunkDigester.Digest().String(),
+ ChunkSize: chunkSize,
+ ChunkType: chunkType,
+ })
+ }
+
+ lastOffset = off
+ lastChunkOffset = rcReader.WrittenOut
+ chunkDigester = digest.Canonical.Digester()
+ payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
}
if errRead == io.EOF {
if startOffset > 0 {
- endOffset, err = restartCompression()
- if err != nil {
- return err
- }
checksum = payloadDigester.Digest().String()
}
break
@@ -112,30 +334,42 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
for k, v := range hdr.Xattrs {
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
}
- m := internal.FileMetadata{
- Type: typ,
- Name: hdr.Name,
- Linkname: hdr.Linkname,
- Mode: hdr.Mode,
- Size: hdr.Size,
- UID: hdr.Uid,
- GID: hdr.Gid,
- ModTime: hdr.ModTime,
- AccessTime: hdr.AccessTime,
- ChangeTime: hdr.ChangeTime,
- Devmajor: hdr.Devmajor,
- Devminor: hdr.Devminor,
- Xattrs: xattrs,
- Digest: checksum,
- Offset: startOffset,
- EndOffset: endOffset,
-
- // ChunkSize is 0 for the last chunk
- ChunkSize: 0,
- ChunkOffset: 0,
- ChunkDigest: checksum,
- }
- metadata = append(metadata, m)
+ entries := []internal.FileMetadata{
+ {
+ Type: typ,
+ Name: hdr.Name,
+ Linkname: hdr.Linkname,
+ Mode: hdr.Mode,
+ Size: hdr.Size,
+ UID: hdr.Uid,
+ GID: hdr.Gid,
+ ModTime: &hdr.ModTime,
+ AccessTime: &hdr.AccessTime,
+ ChangeTime: &hdr.ChangeTime,
+ Devmajor: hdr.Devmajor,
+ Devminor: hdr.Devminor,
+ Xattrs: xattrs,
+ Digest: checksum,
+ Offset: startOffset,
+ EndOffset: lastOffset,
+ },
+ }
+ for i := 1; i < len(chunks); i++ {
+ entries = append(entries, internal.FileMetadata{
+ Type: internal.TypeChunk,
+ Name: hdr.Name,
+ ChunkOffset: chunks[i].ChunkOffset,
+ })
+ }
+ if len(chunks) > 1 {
+ for i := range chunks {
+ entries[i].ChunkSize = chunks[i].ChunkSize
+ entries[i].Offset = chunks[i].Offset
+ entries[i].ChunkDigest = chunks[i].Checksum
+ entries[i].ChunkType = chunks[i].ChunkType
+ }
+ }
+ metadata = append(metadata, entries...)
}
rawBytes := tr.RawBytes()
@@ -212,7 +446,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
if level == nil {
- l := 3
+ l := 10
level = &l
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
new file mode 100644
index 000000000..f4dfad822
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2011 The Perkeep Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package rollsum implements rolling checksums similar to apenwarr's bup, which
+// is similar to librsync.
+//
+// The bup project is at https://github.com/apenwarr/bup and its splitting in
+// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c
+package compressor
+
+import (
+ "math/bits"
+)
+
+const windowSize = 64 // Roll assumes windowSize is a power of 2
+const charOffset = 31
+
+const blobBits = 13
+const blobSize = 1 << blobBits // 8k
+
+type RollSum struct {
+ s1, s2 uint32
+ window [windowSize]uint8
+ wofs int
+}
+
+func NewRollSum() *RollSum {
+ return &RollSum{
+ s1: windowSize * charOffset,
+ s2: windowSize * (windowSize - 1) * charOffset,
+ }
+}
+
+func (rs *RollSum) add(drop, add uint32) {
+ s1 := rs.s1 + add - drop
+ rs.s1 = s1
+ rs.s2 += s1 - uint32(windowSize)*(drop+charOffset)
+}
+
+// Roll adds ch to the rolling sum.
+func (rs *RollSum) Roll(ch byte) {
+ wp := &rs.window[rs.wofs]
+ rs.add(uint32(*wp), uint32(ch))
+ *wp = ch
+ rs.wofs = (rs.wofs + 1) & (windowSize - 1)
+}
+
+// OnSplit reports whether at least 13 consecutive trailing bits of
+// the current checksum are set the same way.
+func (rs *RollSum) OnSplit() bool {
+ return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1))
+}
+
+// OnSplitWithBits reports whether at least n consecutive trailing bits
+// of the current checksum are set the same way.
+func (rs *RollSum) OnSplitWithBits(n uint32) bool {
+ mask := (uint32(1) << n) - 1
+ return rs.s2&mask == (^uint32(0))&mask
+}
+
+func (rs *RollSum) Bits() int {
+ rsum := rs.Digest() >> (blobBits + 1)
+ return blobBits + bits.TrailingZeros32(^rsum)
+}
+
+func (rs *RollSum) Digest() uint32 {
+ return (rs.s1 << 16) | (rs.s2 & 0xffff)
+}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
index c91c43d85..3bb5286d9 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go
@@ -8,11 +8,11 @@ import (
"archive/tar"
"bytes"
"encoding/binary"
- "encoding/json"
"fmt"
"io"
"time"
+ jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
)
@@ -20,6 +20,9 @@ import (
type TOC struct {
Version int `json:"version"`
Entries []FileMetadata `json:"entries"`
+
+ // internal: used by unmarshalToc
+ StringsBuf bytes.Buffer `json:"-"`
}
type FileMetadata struct {
@@ -27,26 +30,34 @@ type FileMetadata struct {
Name string `json:"name"`
Linkname string `json:"linkName,omitempty"`
Mode int64 `json:"mode,omitempty"`
- Size int64 `json:"size"`
- UID int `json:"uid"`
- GID int `json:"gid"`
- ModTime time.Time `json:"modtime"`
- AccessTime time.Time `json:"accesstime"`
- ChangeTime time.Time `json:"changetime"`
- Devmajor int64 `json:"devMajor"`
- Devminor int64 `json:"devMinor"`
+ Size int64 `json:"size,omitempty"`
+ UID int `json:"uid,omitempty"`
+ GID int `json:"gid,omitempty"`
+ ModTime *time.Time `json:"modtime,omitempty"`
+ AccessTime *time.Time `json:"accesstime,omitempty"`
+ ChangeTime *time.Time `json:"changetime,omitempty"`
+ Devmajor int64 `json:"devMajor,omitempty"`
+ Devminor int64 `json:"devMinor,omitempty"`
Xattrs map[string]string `json:"xattrs,omitempty"`
Digest string `json:"digest,omitempty"`
Offset int64 `json:"offset,omitempty"`
EndOffset int64 `json:"endOffset,omitempty"`
- // Currently chunking is not supported.
ChunkSize int64 `json:"chunkSize,omitempty"`
ChunkOffset int64 `json:"chunkOffset,omitempty"`
ChunkDigest string `json:"chunkDigest,omitempty"`
+ ChunkType string `json:"chunkType,omitempty"`
+
+ // internal: computed by mergeTOCEntries.
+ Chunks []*FileMetadata `json:"-"`
}
const (
+ ChunkTypeData = ""
+ ChunkTypeZeros = "zeros"
+)
+
+const (
TypeReg = "reg"
TypeChunk = "chunk"
TypeLink = "hardlink"
@@ -123,6 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
Entries: metadata,
}
+ var json = jsoniter.ConfigCompatibleWithStandardLibrary
// Generate the manifest
manifest, err := json.Marshal(toc)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 52d21d689..92b15c2bf 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -4,8 +4,8 @@ import (
archivetar "archive/tar"
"context"
"encoding/base64"
- "encoding/json"
"fmt"
+ "hash"
"io"
"io/ioutil"
"os"
@@ -13,6 +13,7 @@ import (
"reflect"
"sort"
"strings"
+ "sync"
"sync/atomic"
"syscall"
"time"
@@ -43,28 +44,35 @@ const (
bigDataKey = "zstd-chunked-manifest"
fileTypeZstdChunked = iota
- fileTypeEstargz = iota
+ fileTypeEstargz
+ fileTypeNoCompression
+ fileTypeHole
+
+ copyGoRoutines = 32
)
type compressedFileType int
type chunkedDiffer struct {
- stream ImageSourceSeekable
- manifest []byte
- layersMetadata map[string][]internal.FileMetadata
- layersTarget map[string]string
- tocOffset int64
- fileType compressedFileType
+ stream ImageSourceSeekable
+ manifest []byte
+ layersCache *layersCache
+ tocOffset int64
+ fileType compressedFileType
+
+ copyBuffer []byte
gzipReader *pgzip.Reader
+ zstdReader *zstd.Decoder
+ rawReader io.Reader
}
var xattrsToIgnore = map[string]interface{}{
"security.selinux": true,
}
-func timeToTimespec(time time.Time) (ts unix.Timespec) {
- if time.IsZero() {
+func timeToTimespec(time *time.Time) (ts unix.Timespec) {
+ if time == nil || time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
@@ -128,54 +136,6 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
return dstFile, st.Size(), nil
}
-func prepareOtherLayersCache(layersMetadata map[string][]internal.FileMetadata) map[string]map[string][]*internal.FileMetadata {
- maps := make(map[string]map[string][]*internal.FileMetadata)
-
- for layerID, v := range layersMetadata {
- r := make(map[string][]*internal.FileMetadata)
- for i := range v {
- if v[i].Digest != "" {
- r[v[i].Digest] = append(r[v[i].Digest], &v[i])
- }
- }
- maps[layerID] = r
- }
- return maps
-}
-
-func getLayersCache(store storage.Store) (map[string][]internal.FileMetadata, map[string]string, error) {
- allLayers, err := store.Layers()
- if err != nil {
- return nil, nil, err
- }
-
- layersMetadata := make(map[string][]internal.FileMetadata)
- layersTarget := make(map[string]string)
- for _, r := range allLayers {
- manifestReader, err := store.LayerBigData(r.ID, bigDataKey)
- if err != nil {
- continue
- }
- defer manifestReader.Close()
- manifest, err := ioutil.ReadAll(manifestReader)
- if err != nil {
- return nil, nil, fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
- }
- var toc internal.TOC
- if err := json.Unmarshal(manifest, &toc); err != nil {
- continue
- }
- layersMetadata[r.ID] = toc.Entries
- target, err := store.DifferTarget(r.ID)
- if err != nil {
- return nil, nil, fmt.Errorf("get checkout directory layer %q: %w", r.ID, err)
- }
- layersTarget[r.ID] = target
- }
-
- return layersMetadata, layersTarget, nil
-}
-
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
@@ -192,18 +152,18 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
- layersMetadata, layersTarget, err := getLayersCache(store)
+ layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
}
return &chunkedDiffer{
- stream: iss,
- manifest: manifest,
- layersMetadata: layersMetadata,
- layersTarget: layersTarget,
- tocOffset: tocOffset,
- fileType: fileTypeZstdChunked,
+ copyBuffer: makeCopyBuffer(),
+ stream: iss,
+ manifest: manifest,
+ layersCache: layersCache,
+ tocOffset: tocOffset,
+ fileType: fileTypeZstdChunked,
}, nil
}
@@ -212,37 +172,41 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
- layersMetadata, layersTarget, err := getLayersCache(store)
+ layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
}
return &chunkedDiffer{
- stream: iss,
- manifest: manifest,
- layersMetadata: layersMetadata,
- layersTarget: layersTarget,
- tocOffset: tocOffset,
- fileType: fileTypeEstargz,
+ copyBuffer: makeCopyBuffer(),
+ stream: iss,
+ manifest: manifest,
+ layersCache: layersCache,
+ tocOffset: tocOffset,
+ fileType: fileTypeEstargz,
}, nil
}
+func makeCopyBuffer() []byte {
+ return make([]byte, 2<<20)
+}
+
// copyFileFromOtherLayer copies a file from another layer
// file is the file to look for.
// source is the path to the source layer checkout.
-// otherFile contains the metadata for the file.
+// name is the path to the file to copy in source.
// dirfd is an open file descriptor to the destination root directory.
// useHardLinks defines whether the deduplication can be performed using hard links.
-func copyFileFromOtherLayer(file *internal.FileMetadata, source string, otherFile *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0)
if err != nil {
- return false, nil, 0, fmt.Errorf("open source file %q: %w", source, err)
+ return false, nil, 0, fmt.Errorf("open source file: %w", err)
}
defer unix.Close(srcDirfd)
- srcFile, err := openFileUnderRoot(otherFile.Name, srcDirfd, unix.O_RDONLY, 0)
+ srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0)
if err != nil {
- return false, nil, 0, fmt.Errorf("open source file %q under target rootfs: %w", otherFile.Name, err)
+ return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err)
}
defer srcFile.Close()
@@ -308,45 +272,9 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo
return canDedupMetadataWithHardLink(file, &otherFile)
}
-// findFileInOtherLayers finds the specified file in other layers.
-// file is the file to look for.
-// dirfd is an open file descriptor to the checkout root directory.
-// layersMetadata contains the metadata for each layer in the storage.
-// layersTarget maps each layer to its checkout on disk.
-// useHardLinks defines whether the deduplication can be performed using hard links.
-func findFileInOtherLayers(file *internal.FileMetadata, dirfd int, layersMetadata map[string]map[string][]*internal.FileMetadata, layersTarget map[string]string, useHardLinks bool) (bool, *os.File, int64, error) {
- // this is ugly, needs to be indexed
- for layerID, checksums := range layersMetadata {
- source, ok := layersTarget[layerID]
- if !ok {
- continue
- }
- files, found := checksums[file.Digest]
- if !found {
- continue
- }
- for _, candidate := range files {
- // check if it is a valid candidate to dedup file
- if useHardLinks && !canDedupMetadataWithHardLink(file, candidate) {
- continue
- }
-
- found, dstFile, written, err := copyFileFromOtherLayer(file, source, candidate, dirfd, useHardLinks)
- if found && err == nil {
- return found, dstFile, written, err
- }
- }
- }
- // If hard links deduplication was used and it has failed, try again without hard links.
- if useHardLinks {
- return findFileInOtherLayers(file, dirfd, layersMetadata, layersTarget, false)
- }
- return false, nil, 0, nil
-}
-
-func getFileDigest(f *os.File) (digest.Digest, error) {
+func getFileDigest(f *os.File, buf []byte) (digest.Digest, error) {
digester := digest.Canonical.Digester()
- if _, err := io.Copy(digester.Hash(), f); err != nil {
+ if _, err := io.CopyBuffer(digester.Hash(), f, buf); err != nil {
return "", err
}
return digester.Digest(), nil
@@ -408,7 +336,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di
// file is the file to look for.
// dirfd is an open fd to the destination checkout.
// useHardLinks defines whether the deduplication can be performed using hard links.
-func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool, buf []byte) (bool, *os.File, int64, error) {
sourceFile := filepath.Clean(filepath.Join("/", file.Name))
if !strings.HasPrefix(sourceFile, "/usr/") {
// limit host deduplication to files under /usr.
@@ -437,7 +365,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool
return false, nil, 0, err
}
- checksum, err := getFileDigest(f)
+ checksum, err := getFileDigest(f, buf)
if err != nil {
return false, nil, 0, err
}
@@ -459,7 +387,7 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool
dstFile.Close()
return false, nil, 0, err
}
- checksum, err = getFileDigest(f)
+ checksum, err = getFileDigest(f, buf)
if err != nil {
dstFile.Close()
return false, nil, 0, err
@@ -471,6 +399,19 @@ func findFileOnTheHost(file *internal.FileMetadata, dirfd int, useHardLinks bool
return true, dstFile, written, nil
}
+// findFileInOtherLayers finds the specified file in other layers.
+// cache is the layers cache to use.
+// file is the file to look for.
+// dirfd is an open file descriptor to the checkout root directory.
+// useHardLinks defines whether the deduplication can be performed using hard links.
+func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) {
+ target, name, err := cache.findFileInOtherLayers(file, useHardLinks)
+ if err != nil || name == "" {
+ return false, nil, 0, err
+ }
+ return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks)
+}
+
func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error {
if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 {
return nil
@@ -497,18 +438,46 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption
return nil
}
-type missingFile struct {
- File *internal.FileMetadata
+type originFile struct {
+ Root string
+ Path string
+ Offset int64
+}
+
+type missingFileChunk struct {
Gap int64
+ Hole bool
+
+ File *internal.FileMetadata
+
+ CompressedSize int64
+ UncompressedSize int64
}
-func (m missingFile) Length() int64 {
- return m.File.EndOffset - m.File.Offset
+type missingPart struct {
+ Hole bool
+ SourceChunk *ImageSourceChunk
+ OriginFile *originFile
+ Chunks []missingFileChunk
}
-type missingChunk struct {
- RawChunk ImageSourceChunk
- Files []missingFile
+func (o *originFile) OpenFile() (io.ReadCloser, error) {
+ srcDirfd, err := unix.Open(o.Root, unix.O_RDONLY, 0)
+ if err != nil {
+ return nil, fmt.Errorf("open source file: %w", err)
+ }
+ defer unix.Close(srcDirfd)
+
+ srcFile, err := openFileUnderRoot(o.Path, srcDirfd, unix.O_RDONLY, 0)
+ if err != nil {
+ return nil, fmt.Errorf("open source file under target rootfs: %w", err)
+ }
+
+ if _, err := srcFile.Seek(o.Offset, 0); err != nil {
+ srcFile.Close()
+ return nil, err
+ }
+ return srcFile, nil
}
// setFileAttrs sets the file attributes for file given metadata
@@ -711,7 +680,7 @@ func openFileUnderRoot(name string, dirfd int, flags uint64, mode os.FileMode) (
newDirfd, err2 := openOrCreateDirUnderRoot(parent, dirfd, 0)
if err2 == nil {
defer newDirfd.Close()
- fd, err := openFileUnderRootRaw(dirfd, name, flags, mode)
+ fd, err := openFileUnderRootRaw(int(newDirfd.Fd()), filepath.Base(name), flags, mode)
if err == nil {
return os.NewFile(uintptr(fd), name), nil
}
@@ -755,159 +724,367 @@ func openOrCreateDirUnderRoot(name string, dirfd int, mode os.FileMode) (*os.Fil
return nil, err
}
-func (c *chunkedDiffer) createFileFromCompressedStream(dest string, dirfd int, reader io.Reader, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) (err error) {
- file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
- if err != nil {
- return err
+func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressedFileType, from io.Reader, mf *missingFileChunk) (compressedFileType, error) {
+ switch {
+ case partCompression == fileTypeHole:
+ // The entire part is a hole. Do not need to read from a file.
+ c.rawReader = nil
+ return fileTypeHole, nil
+ case mf.Hole:
+ // Only the missing chunk in the requested part refers to a hole.
+ // The received data must be discarded.
+ limitReader := io.LimitReader(from, mf.CompressedSize)
+ _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer)
+ return fileTypeHole, err
+ case partCompression == fileTypeZstdChunked:
+ c.rawReader = io.LimitReader(from, mf.CompressedSize)
+ if c.zstdReader == nil {
+ r, err := zstd.NewReader(c.rawReader)
+ if err != nil {
+ return partCompression, err
+ }
+ c.zstdReader = r
+ } else {
+ if err := c.zstdReader.Reset(c.rawReader); err != nil {
+ return partCompression, err
+ }
+ }
+ case partCompression == fileTypeEstargz:
+ c.rawReader = io.LimitReader(from, mf.CompressedSize)
+ if c.gzipReader == nil {
+ r, err := pgzip.NewReader(c.rawReader)
+ if err != nil {
+ return partCompression, err
+ }
+ c.gzipReader = r
+ } else {
+ if err := c.gzipReader.Reset(c.rawReader); err != nil {
+ return partCompression, err
+ }
+ }
+ case partCompression == fileTypeNoCompression:
+ c.rawReader = io.LimitReader(from, mf.UncompressedSize)
+ default:
+ return partCompression, fmt.Errorf("unknown file type %q", c.fileType)
}
- defer func() {
- err2 := file.Close()
- if err == nil {
- err = err2
+ return partCompression, nil
+}
+
+// hashHole writes SIZE zeros to the specified hasher
+func hashHole(h hash.Hash, size int64, copyBuffer []byte) error {
+ count := int64(len(copyBuffer))
+ if size < count {
+ count = size
+ }
+ for i := int64(0); i < count; i++ {
+ copyBuffer[i] = 0
+ }
+ for size > 0 {
+ count = int64(len(copyBuffer))
+ if size < count {
+ count = size
}
- }()
+ if _, err := h.Write(copyBuffer[:count]); err != nil {
+ return err
+ }
+ size -= count
+ }
+ return nil
+}
- digester := digest.Canonical.Digester()
- checksum := digester.Hash()
- to := io.MultiWriter(file, checksum)
+// appendHole creates a hole with the specified size at the open fd.
+func appendHole(fd int, size int64) error {
+ off, err := unix.Seek(fd, size, unix.SEEK_CUR)
+ if err != nil {
+ return err
+ }
+ // Make sure the file size is changed. It might be the last hole and no other data written afterwards.
+ if err := unix.Ftruncate(fd, off); err != nil {
+ return err
+ }
+ return nil
+}
- switch c.fileType {
+func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error {
+ switch compression {
case fileTypeZstdChunked:
- z, err := zstd.NewReader(reader)
- if err != nil {
+ defer c.zstdReader.Reset(nil)
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil {
return err
}
- defer z.Close()
-
- if _, err := io.Copy(to, io.LimitReader(z, metadata.Size)); err != nil {
+ case fileTypeEstargz:
+ defer c.gzipReader.Close()
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.gzipReader, size), c.copyBuffer); err != nil {
return err
}
- if _, err := io.Copy(ioutil.Discard, reader); err != nil {
+ case fileTypeNoCompression:
+ if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.rawReader, size), c.copyBuffer); err != nil {
return err
}
- case fileTypeEstargz:
- if c.gzipReader == nil {
- r, err := pgzip.NewReader(reader)
- if err != nil {
- return err
- }
- c.gzipReader = r
- } else {
- if err := c.gzipReader.Reset(reader); err != nil {
- return err
- }
- }
- defer c.gzipReader.Close()
-
- if _, err := io.Copy(to, io.LimitReader(c.gzipReader, metadata.Size)); err != nil {
+ case fileTypeHole:
+ if err := appendHole(int(destFile.file.Fd()), size); err != nil {
return err
}
- if _, err := io.Copy(ioutil.Discard, reader); err != nil {
+ if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil {
return err
}
default:
return fmt.Errorf("unknown file type %q", c.fileType)
}
+ return nil
+}
+
+type destinationFile struct {
+ dirfd int
+ file *os.File
+ digester digest.Digester
+ hash hash.Hash
+ to io.Writer
+ metadata *internal.FileMetadata
+ options *archive.TarOptions
+}
+
+func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) {
+ file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ digester := digest.Canonical.Digester()
+ hash := digester.Hash()
+ to := io.MultiWriter(file, hash)
+
+ return &destinationFile{
+ file: file,
+ digester: digester,
+ hash: hash,
+ to: to,
+ metadata: metadata,
+ options: options,
+ dirfd: dirfd,
+ }, nil
+}
- manifestChecksum, err := digest.Parse(metadata.Digest)
+func (d *destinationFile) Close() error {
+ manifestChecksum, err := digest.Parse(d.metadata.Digest)
if err != nil {
return err
}
- if digester.Digest() != manifestChecksum {
- return fmt.Errorf("checksum mismatch for %q", dest)
+ if d.digester.Digest() != manifestChecksum {
+ return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum)
}
- return setFileAttrs(dirfd, file, mode, metadata, options, false)
+
+ return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false)
}
-func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
- for mc := 0; ; mc++ {
- var part io.ReadCloser
- select {
- case p := <-streams:
- part = p
- case err := <-errs:
- return err
- }
- if part == nil {
- if mc == len(missingChunks) {
- break
+func closeDestinationFiles(files chan *destinationFile, errors chan error) {
+ for f := range files {
+ errors <- f.Close()
+ }
+ close(errors)
+}
+
+func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
+ var destFile *destinationFile
+
+ filesToClose := make(chan *destinationFile, 3)
+ closeFilesErrors := make(chan error, 2)
+
+ go closeDestinationFiles(filesToClose, closeFilesErrors)
+ defer func() {
+ close(filesToClose)
+ for e := range closeFilesErrors {
+ if e != nil && Err == nil {
+ Err = e
}
- return errors.Errorf("invalid stream returned")
}
- if mc == len(missingChunks) {
- part.Close()
- return errors.Errorf("too many chunks returned")
+ }()
+
+ for _, missingPart := range missingParts {
+ var part io.ReadCloser
+ partCompression := c.fileType
+ switch {
+ case missingPart.Hole:
+ partCompression = fileTypeHole
+ case missingPart.OriginFile != nil:
+ var err error
+ part, err = missingPart.OriginFile.OpenFile()
+ if err != nil {
+ return err
+ }
+ partCompression = fileTypeNoCompression
+ case missingPart.SourceChunk != nil:
+ select {
+ case p := <-streams:
+ part = p
+ case err := <-errs:
+ return err
+ }
+ if part == nil {
+ return errors.Errorf("invalid stream returned")
+ }
+ default:
+ return errors.Errorf("internal error: missing part misses both local and remote data stream")
}
- for _, mf := range missingChunks[mc].Files {
+ for _, mf := range missingPart.Chunks {
if mf.Gap > 0 {
limitReader := io.LimitReader(part, mf.Gap)
- _, err := io.Copy(ioutil.Discard, limitReader)
+ _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer)
if err != nil {
- part.Close()
- return err
+ Err = err
+ goto exit
}
continue
}
- limitReader := io.LimitReader(part, mf.Length())
+ if mf.File.Name == "" {
+ Err = errors.Errorf("file name empty")
+ goto exit
+ }
- if err := c.createFileFromCompressedStream(dest, dirfd, limitReader, os.FileMode(mf.File.Mode), mf.File, options); err != nil {
- part.Close()
- return err
+ compression, err := c.prepareCompressedStreamToFile(partCompression, part, &mf)
+ if err != nil {
+ Err = err
+ goto exit
+ }
+
+ // Open the new file if it is different that what is already
+ // opened
+ if destFile == nil || destFile.metadata.Name != mf.File.Name {
+ var err error
+ if destFile != nil {
+ cleanup:
+ for {
+ select {
+ case err = <-closeFilesErrors:
+ if err != nil {
+ Err = err
+ goto exit
+ }
+ default:
+ break cleanup
+ }
+ }
+ filesToClose <- destFile
+ }
+ destFile, err = openDestinationFile(dirfd, mf.File, options)
+ if err != nil {
+ Err = err
+ goto exit
+ }
+ }
+
+ if err := c.appendCompressedStreamToFile(compression, destFile, mf.UncompressedSize); err != nil {
+ Err = err
+ goto exit
+ }
+ if c.rawReader != nil {
+ if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil {
+ Err = err
+ goto exit
+ }
+ }
+ }
+ exit:
+ if part != nil {
+ part.Close()
+ if Err != nil {
+ break
}
}
- part.Close()
}
+
+ if destFile != nil {
+ return destFile.Close()
+ }
+
return nil
}
-func mergeMissingChunks(missingChunks []missingChunk, target int) []missingChunk {
- if len(missingChunks) <= target {
- return missingChunks
+func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
+ getGap := func(missingParts []missingPart, i int) int {
+ prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length
+ return int(missingParts[i].SourceChunk.Offset - prev)
+ }
+ getCost := func(missingParts []missingPart, i int) int {
+ cost := getGap(missingParts, i)
+ if missingParts[i-1].OriginFile != nil {
+ cost += int(missingParts[i-1].SourceChunk.Length)
+ }
+ if missingParts[i].OriginFile != nil {
+ cost += int(missingParts[i].SourceChunk.Length)
+ }
+ return cost
+ }
+
+ // simple case: merge chunks from the same file.
+ newMissingParts := missingParts[0:1]
+ prevIndex := 0
+ for i := 1; i < len(missingParts); i++ {
+ gap := getGap(missingParts, i)
+ if gap == 0 && missingParts[prevIndex].OriginFile == nil &&
+ missingParts[i].OriginFile == nil &&
+ !missingParts[prevIndex].Hole && !missingParts[i].Hole &&
+ len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 &&
+ missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name {
+ missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
+ missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize
+ missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize
+ } else {
+ newMissingParts = append(newMissingParts, missingParts[i])
+ prevIndex++
+ }
}
+ missingParts = newMissingParts
- getGap := func(missingChunks []missingChunk, i int) int {
- prev := missingChunks[i-1].RawChunk.Offset + missingChunks[i-1].RawChunk.Length
- return int(missingChunks[i].RawChunk.Offset - prev)
+ if len(missingParts) <= target {
+ return missingParts
}
// this implementation doesn't account for duplicates, so it could merge
// more than necessary to reach the specified target. Since target itself
// is a heuristic value, it doesn't matter.
- var gaps []int
- for i := 1; i < len(missingChunks); i++ {
- gaps = append(gaps, getGap(missingChunks, i))
+ costs := make([]int, len(missingParts)-1)
+ for i := 1; i < len(missingParts); i++ {
+ costs[i-1] = getCost(missingParts, i)
}
- sort.Ints(gaps)
+ sort.Ints(costs)
- toShrink := len(missingChunks) - target
- targetValue := gaps[toShrink-1]
+ toShrink := len(missingParts) - target
+ if toShrink >= len(costs) {
+ toShrink = len(costs) - 1
+ }
+ targetValue := costs[toShrink]
- newMissingChunks := missingChunks[0:1]
- for i := 1; i < len(missingChunks); i++ {
- gap := getGap(missingChunks, i)
- if gap > targetValue {
- newMissingChunks = append(newMissingChunks, missingChunks[i])
+ newMissingParts = missingParts[0:1]
+ for i := 1; i < len(missingParts); i++ {
+ if getCost(missingParts, i) > targetValue {
+ newMissingParts = append(newMissingParts, missingParts[i])
} else {
- prev := &newMissingChunks[len(newMissingChunks)-1]
- prev.RawChunk.Length += uint64(gap) + missingChunks[i].RawChunk.Length
+ gap := getGap(missingParts, i)
+ prev := &newMissingParts[len(newMissingParts)-1]
+ prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length
+ prev.Hole = false
+ prev.OriginFile = nil
if gap > 0 {
- gapFile := missingFile{
+ gapFile := missingFileChunk{
Gap: int64(gap),
}
- prev.Files = append(prev.Files, gapFile)
+ prev.Chunks = append(prev.Chunks, gapFile)
}
- prev.Files = append(prev.Files, missingChunks[i].Files...)
+ prev.Chunks = append(prev.Chunks, missingParts[i].Chunks...)
}
}
- return newMissingChunks
+ return newMissingParts
}
-func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChunks []missingChunk, options *archive.TarOptions) error {
+func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
var chunksToRequest []ImageSourceChunk
- for _, c := range missingChunks {
- chunksToRequest = append(chunksToRequest, c.RawChunk)
+ for _, c := range missingParts {
+ if c.OriginFile == nil && !c.Hole {
+ chunksToRequest = append(chunksToRequest, *c.SourceChunk)
+ }
}
// There are some missing files. Prepare a multirange request for the missing chunks.
@@ -921,20 +1098,20 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingChun
}
if _, ok := err.(ErrBadRequest); ok {
- requested := len(missingChunks)
+ requested := len(missingParts)
// If the server cannot handle at least 64 chunks in a single request, just give up.
if requested < 64 {
return err
}
// Merge more chunks to request
- missingChunks = mergeMissingChunks(missingChunks, requested/2)
+ missingParts = mergeMissingChunks(missingParts, requested/2)
continue
}
return err
}
- if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingChunks, options); err != nil {
+ if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil {
return err
}
return nil
@@ -960,7 +1137,7 @@ func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.File
}
}
- file, err := openFileUnderRoot(name, dirfd, unix.O_DIRECTORY|unix.O_RDONLY, 0)
+ file, err := openFileUnderRoot(base, parentFd, unix.O_DIRECTORY|unix.O_RDONLY, 0)
if err != nil {
return err
}
@@ -1109,7 +1286,69 @@ func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bo
return def
}
+type findAndCopyFileOptions struct {
+ useHardLinks bool
+ enableHostDedup bool
+ ostreeRepos []string
+ options *archive.TarOptions
+}
+
+func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
+ finalizeFile := func(dstFile *os.File) error {
+ if dstFile != nil {
+ defer dstFile.Close()
+ if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+
+ found, dstFile, _, err = findFileInOSTreeRepos(r, copyOptions.ostreeRepos, dirfd, copyOptions.useHardLinks)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+
+ if copyOptions.enableHostDedup {
+ found, dstFile, _, err = findFileOnTheHost(r, dirfd, copyOptions.useHardLinks, c.copyBuffer)
+ if err != nil {
+ return false, err
+ }
+ if found {
+ if err := finalizeFile(dstFile); err != nil {
+ return false, err
+ }
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
+ defer c.layersCache.release()
+ defer func() {
+ if c.zstdReader != nil {
+ c.zstdReader.Close()
+ }
+ }()
+
bigData := map[string][]byte{
bigDataKey: c.manifest,
}
@@ -1137,14 +1376,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":")
// Generate the manifest
- var toc internal.TOC
- if err := json.Unmarshal(c.manifest, &toc); err != nil {
+ toc, err := unmarshalToc(c.manifest)
+ if err != nil {
return output, err
}
whiteoutConverter := archive.GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
- var missingChunks []missingChunk
+ var missingParts []missingPart
mergedEntries, err := c.mergeTocEntries(c.fileType, toc.Entries)
if err != nil {
@@ -1170,13 +1409,57 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
defer unix.Close(dirfd)
- otherLayersCache := prepareOtherLayersCache(c.layersMetadata)
-
// hardlinks can point to missing files. So create them after all files
// are retrieved
var hardLinks []hardLinkToCreate
- missingChunksSize, totalChunksSize := int64(0), int64(0)
+ missingPartsSize, totalChunksSize := int64(0), int64(0)
+
+ copyOptions := findAndCopyFileOptions{
+ useHardLinks: useHardLinks,
+ enableHostDedup: enableHostDedup,
+ ostreeRepos: ostreeRepos,
+ options: options,
+ }
+
+ type copyFileJob struct {
+ njob int
+ index int
+ mode os.FileMode
+ metadata *internal.FileMetadata
+
+ found bool
+ err error
+ }
+
+ var wg sync.WaitGroup
+
+ copyResults := make([]copyFileJob, len(mergedEntries))
+
+ copyFileJobs := make(chan copyFileJob)
+ defer func() {
+ if copyFileJobs != nil {
+ close(copyFileJobs)
+ }
+ wg.Wait()
+ }()
+
+ for i := 0; i < copyGoRoutines; i++ {
+ wg.Add(1)
+ jobs := copyFileJobs
+
+ go func() {
+ defer wg.Done()
+ for job := range jobs {
+ found, err := c.findAndCopyFile(dirfd, job.metadata, &copyOptions, job.mode)
+ job.err = err
+ job.found = found
+ copyResults[job.njob] = job
+ }
+ }()
+ }
+
+ filesToWaitFor := 0
for i, r := range mergedEntries {
if options.ForceMask != nil {
value := fmt.Sprintf("%d:%d:0%o", r.UID, r.GID, r.Mode&07777)
@@ -1272,74 +1555,95 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
totalChunksSize += r.Size
- finalizeFile := func(dstFile *os.File) error {
- if dstFile != nil {
- defer dstFile.Close()
- if err := setFileAttrs(dirfd, dstFile, mode, &r, options, false); err != nil {
- return err
- }
+ if t == tar.TypeReg {
+ index := i
+ njob := filesToWaitFor
+ job := copyFileJob{
+ mode: mode,
+ metadata: &mergedEntries[index],
+ index: index,
+ njob: njob,
}
- return nil
+ copyFileJobs <- job
+ filesToWaitFor++
}
+ }
- found, dstFile, _, err := findFileInOtherLayers(&r, dirfd, otherLayersCache, c.layersTarget, useHardLinks)
- if err != nil {
- return output, err
- }
- if found {
- if err := finalizeFile(dstFile); err != nil {
- return output, err
- }
- continue
- }
+ close(copyFileJobs)
+ copyFileJobs = nil
- found, dstFile, _, err = findFileInOSTreeRepos(&r, ostreeRepos, dirfd, useHardLinks)
- if err != nil {
- return output, err
+ wg.Wait()
+
+ for _, res := range copyResults[:filesToWaitFor] {
+ if res.err != nil {
+ return output, res.err
}
- if found {
- if err := finalizeFile(dstFile); err != nil {
- return output, err
- }
+ // the file was already copied to its destination
+ // so nothing left to do.
+ if res.found {
continue
}
- if enableHostDedup {
- found, dstFile, _, err = findFileOnTheHost(&r, dirfd, useHardLinks)
- if err != nil {
- return output, err
- }
- if found {
- if err := finalizeFile(dstFile); err != nil {
- return output, err
- }
- continue
+ r := &mergedEntries[res.index]
+
+ missingPartsSize += r.Size
+
+ remainingSize := r.Size
+
+ // the file is missing, attempt to find individual chunks.
+ for _, chunk := range r.Chunks {
+ compressedSize := int64(chunk.EndOffset - chunk.Offset)
+ size := remainingSize
+ if chunk.ChunkSize > 0 {
+ size = chunk.ChunkSize
}
- }
+ remainingSize = remainingSize - size
- missingChunksSize += r.Size
- if t == tar.TypeReg {
rawChunk := ImageSourceChunk{
- Offset: uint64(r.Offset),
- Length: uint64(r.EndOffset - r.Offset),
+ Offset: uint64(chunk.Offset),
+ Length: uint64(compressedSize),
}
-
- file := missingFile{
- File: &mergedEntries[i],
+ file := missingFileChunk{
+ File: &mergedEntries[res.index],
+ CompressedSize: compressedSize,
+ UncompressedSize: size,
}
-
- missingChunks = append(missingChunks, missingChunk{
- RawChunk: rawChunk,
- Files: []missingFile{
+ mp := missingPart{
+ SourceChunk: &rawChunk,
+ Chunks: []missingFileChunk{
file,
},
- })
+ }
+
+ switch chunk.ChunkType {
+ case internal.ChunkTypeData:
+ root, path, offset, err := c.layersCache.findChunkInOtherLayers(chunk)
+ if err != nil {
+ return output, err
+ }
+ if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) {
+ missingPartsSize -= size
+ mp.OriginFile = &originFile{
+ Root: root,
+ Path: path,
+ Offset: offset,
+ }
+ }
+ case internal.ChunkTypeZeros:
+ missingPartsSize -= size
+ mp.Hole = true
+ // Mark all chunks belonging to the missing part as holes
+ for i := range mp.Chunks {
+ mp.Chunks[i].Hole = true
+ }
+ }
+ missingParts = append(missingParts, mp)
}
}
// There are some missing files. Prepare a multirange request for the missing chunks.
- if len(missingChunks) > 0 {
- missingChunks = mergeMissingChunks(missingChunks, maxNumberMissingChunks)
- if err := c.retrieveMissingFiles(dest, dirfd, missingChunks, options); err != nil {
+ if len(missingParts) > 0 {
+ missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
+ if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil {
return output, err
}
}
@@ -1351,31 +1655,69 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
if totalChunksSize > 0 {
- logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingChunksSize, totalChunksSize, float32(missingChunksSize*100.0)/float32(totalChunksSize))
+ logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
}
return output, nil
}
+func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool {
+ // ignore the metadata files for the estargz format.
+ if fileType != fileTypeEstargz {
+ return false
+ }
+ switch e.Name {
+ // ignore the metadata files for the estargz format.
+ case estargz.PrefetchLandmark, estargz.NoPrefetchLandmark, estargz.TOCTarName:
+ return true
+ }
+ return false
+}
+
func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, error) {
- var mergedEntries []internal.FileMetadata
- var prevEntry *internal.FileMetadata
- for _, entry := range entries {
- e := entry
+ countNextChunks := func(start int) int {
+ count := 0
+ for _, e := range entries[start:] {
+ if e.Type != TypeChunk {
+ return count
+ }
+ count++
+ }
+ return count
+ }
- // ignore the metadata files for the estargz format.
- if fileType == fileTypeEstargz && (e.Name == estargz.PrefetchLandmark || e.Name == estargz.NoPrefetchLandmark || e.Name == estargz.TOCTarName) {
+ size := 0
+ for _, entry := range entries {
+ if mustSkipFile(fileType, entry) {
continue
}
+ if entry.Type != TypeChunk {
+ size++
+ }
+ }
+ mergedEntries := make([]internal.FileMetadata, size)
+ m := 0
+ for i := 0; i < len(entries); i++ {
+ e := entries[i]
+ if mustSkipFile(fileType, e) {
+ continue
+ }
if e.Type == TypeChunk {
- if prevEntry == nil || prevEntry.Type != TypeReg {
- return nil, errors.New("chunk type without a regular file")
+ return nil, fmt.Errorf("chunk type without a regular file")
+ }
+
+ if e.Type == TypeReg {
+ nChunks := countNextChunks(i + 1)
+
+ e.Chunks = make([]*internal.FileMetadata, nChunks+1)
+ for j := 0; j <= nChunks; j++ {
+ e.Chunks[j] = &entries[i+j]
+ e.EndOffset = entries[i+j].EndOffset
}
- prevEntry.EndOffset = e.EndOffset
- continue
+ i += nChunks
}
- mergedEntries = append(mergedEntries, e)
- prevEntry = &e
+ mergedEntries[m] = e
+ m++
}
// stargz/estargz doesn't store EndOffset so let's calculate it here
lastOffset := c.tocOffset
@@ -1386,6 +1728,47 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
if mergedEntries[i].Offset != 0 {
lastOffset = mergedEntries[i].Offset
}
+
+ lastChunkOffset := mergedEntries[i].EndOffset
+ for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- {
+ mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset
+ mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset
+ lastChunkOffset = mergedEntries[i].Chunks[j].Offset
+ }
}
return mergedEntries, nil
}
+
+// validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the
+// same digest as chunk.ChunkDigest
+func validateChunkChecksum(chunk *internal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool {
+ parentDirfd, err := unix.Open(root, unix.O_PATH, 0)
+ if err != nil {
+ return false
+ }
+ defer unix.Close(parentDirfd)
+
+ fd, err := openFileUnderRoot(path, parentDirfd, unix.O_RDONLY, 0)
+ if err != nil {
+ return false
+ }
+ defer fd.Close()
+
+ if _, err := unix.Seek(int(fd.Fd()), offset, 0); err != nil {
+ return false
+ }
+
+ r := io.LimitReader(fd, chunk.ChunkSize)
+ digester := digest.Canonical.Digester()
+
+ if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil {
+ return false
+ }
+
+ digest, err := digest.Parse(chunk.ChunkDigest)
+ if err != nil {
+ return false
+ }
+
+ return digester.Digest() == digest
+}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index 83e797599..0abe886eb 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -82,7 +82,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
if len(uidMap) == 1 && uidMap[0].Size == 1 {
uid = uidMap[0].HostID
} else {
- uid, err = toHost(0, uidMap)
+ uid, err = RawToHost(0, uidMap)
if err != nil {
return -1, -1, err
}
@@ -90,7 +90,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
if len(gidMap) == 1 && gidMap[0].Size == 1 {
gid = gidMap[0].HostID
} else {
- gid, err = toHost(0, gidMap)
+ gid, err = RawToHost(0, gidMap)
if err != nil {
return -1, -1, err
}
@@ -98,10 +98,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
return uid, gid, nil
}
-// toContainer takes an id mapping, and uses it to translate a
-// host ID to the remapped ID. If no map is provided, then the translation
-// assumes a 1-to-1 mapping and returns the passed in id
-func toContainer(hostID int, idMap []IDMap) (int, error) {
+// RawToContainer takes an id mapping, and uses it to translate a host ID to
+// the remapped ID. If no map is provided, then the translation assumes a
+// 1-to-1 mapping and returns the passed in id.
+//
+// If you wish to map a (uid,gid) combination you should use the corresponding
+// IDMappings methods, which ensure that you are mapping the correct ID against
+// the correct mapping.
+func RawToContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
@@ -114,10 +118,14 @@ func toContainer(hostID int, idMap []IDMap) (int, error) {
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
}
-// toHost takes an id mapping and a remapped ID, and translates the
-// ID to the mapped host ID. If no map is provided, then the translation
-// assumes a 1-to-1 mapping and returns the passed in id #
-func toHost(contID int, idMap []IDMap) (int, error) {
+// RawToHost takes an id mapping and a remapped ID, and translates the ID to
+// the mapped host ID. If no map is provided, then the translation assumes a
+// 1-to-1 mapping and returns the passed in id.
+//
+// If you wish to map a (uid,gid) combination you should use the corresponding
+// IDMappings methods, which ensure that you are mapping the correct ID against
+// the correct mapping.
+func RawToHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
@@ -187,22 +195,22 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
var err error
var target IDPair
- target.UID, err = toHost(pair.UID, i.uids)
+ target.UID, err = RawToHost(pair.UID, i.uids)
if err != nil {
return target, err
}
- target.GID, err = toHost(pair.GID, i.gids)
+ target.GID, err = RawToHost(pair.GID, i.gids)
return target, err
}
// ToContainer returns the container UID and GID for the host uid and gid
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
- uid, err := toContainer(pair.UID, i.uids)
+ uid, err := RawToContainer(pair.UID, i.uids)
if err != nil {
return -1, -1, err
}
- gid, err := toContainer(pair.GID, i.gids)
+ gid, err := RawToContainer(pair.GID, i.gids)
return uid, gid, err
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
index e444a1bcc..6e6e3b22b 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go
@@ -12,10 +12,14 @@ import (
#cgo LDFLAGS: -l subid
#include <shadow/subid.h>
#include <stdlib.h>
+#include <stdio.h>
const char *Prog = "storage";
+FILE *shadow_logfd = NULL;
+
struct subid_range get_range(struct subid_range *ranges, int i)
{
- return ranges[i];
+ shadow_logfd = stderr;
+ return ranges[i];
}
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index bf1cd4f38..062ce6fb7 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -647,17 +647,21 @@ func GetStore(options types.StoreOptions) (Store, error) {
storesLock.Lock()
defer storesLock.Unlock()
+ // return if BOTH run and graph root are matched, otherwise our run-root can be overridden if the graph is found first
for _, s := range stores {
- if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
+ if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
return s, nil
}
}
- if options.GraphRoot == "" {
- return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
- }
- if options.RunRoot == "" {
- return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
+ // if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither.
+ switch {
+ case options.RunRoot == "" && options.GraphRoot == "":
+ return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified")
+ case options.GraphRoot == "":
+ options.GraphRoot = types.Options().GraphRoot
+ case options.RunRoot == "":
+ options.RunRoot = types.Options().RunRoot
}
if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
@@ -1609,7 +1613,7 @@ func (s *store) ImageBigData(id, key string) ([]byte, error) {
}
}
if foundImage {
- return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q", key, id)
+ return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q (consider removing the image to resolve the issue)", key, id)
}
return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
@@ -2497,23 +2501,29 @@ func (s *store) DeleteContainer(id string) error {
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
wg.Add(1)
go func() {
- var err error
- for attempts := 0; attempts < 50; attempts++ {
- err = os.RemoveAll(gcpath)
- if err == nil || !system.IsEBUSY(err) {
- break
- }
- time.Sleep(time.Millisecond * 100)
+ defer wg.Done()
+ // attempt a simple rm -rf first
+ err := os.RemoveAll(gcpath)
+ if err == nil {
+ errChan <- nil
+ return
}
- errChan <- err
- wg.Done()
+ // and if it fails get to the more complicated cleanup
+ errChan <- system.EnsureRemoveAll(gcpath)
}()
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
wg.Add(1)
go func() {
- errChan <- os.RemoveAll(rcpath)
- wg.Done()
+ defer wg.Done()
+ // attempt a simple rm -rf first
+ err := os.RemoveAll(rcpath)
+ if err == nil {
+ errChan <- nil
+ return
+ }
+ // and if it fails get to the more complicated cleanup
+ errChan <- system.EnsureRemoveAll(rcpath)
}()
go func() {
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go
index 7586cd5ae..ad8377dab 100644
--- a/vendor/github.com/containers/storage/types/options.go
+++ b/vendor/github.com/containers/storage/types/options.go
@@ -17,7 +17,7 @@ import (
)
// TOML-friendly explicit tables used for conversions.
-type tomlConfig struct {
+type TomlConfig struct {
Storage struct {
Driver string `toml:"driver"`
RunRoot string `toml:"runroot"`
@@ -306,7 +306,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
// ReloadConfigurationFile parses the specified configuration file and overrides
// the configuration in storeOptions.
func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
- config := new(tomlConfig)
+ config := new(TomlConfig)
meta, err := toml.DecodeFile(configFile, &config)
if err == nil {
@@ -321,7 +321,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
}
}
- // Clear storeOptions of previos settings
+ // Clear storeOptions of previous settings
*storeOptions = StoreOptions{}
if config.Storage.Driver != "" {
storeOptions.GraphDriverName = config.Storage.Driver
@@ -424,3 +424,38 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
func Options() StoreOptions {
return defaultStoreOptions
}
+
+// Save overwrites the tomlConfig in storage.conf with the given conf
+func Save(conf TomlConfig, rootless bool) error {
+ configFile, err := DefaultConfigFile(rootless)
+ if err != nil {
+ return err
+ }
+ if err = os.Remove(configFile); !os.IsNotExist(err) {
+ return err
+ }
+
+ f, err := os.Open(configFile)
+ if err != nil {
+ return err
+ }
+
+ return toml.NewEncoder(f).Encode(conf)
+}
+
+// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it
+func StorageConfig(rootless bool) (*TomlConfig, error) {
+ config := new(TomlConfig)
+
+ configFile, err := DefaultConfigFile(rootless)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = toml.DecodeFile(configFile, &config)
+ if err != nil {
+ return nil, err
+ }
+
+ return config, nil
+}
diff --git a/vendor/github.com/docker/docker/opts/address_pools.go b/vendor/github.com/docker/docker/opts/address_pools.go
new file mode 100644
index 000000000..fa15c24b9
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/address_pools.go
@@ -0,0 +1,84 @@
+package opts
+
+import (
+ "encoding/csv"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ types "github.com/docker/libnetwork/ipamutils"
+)
+
+// PoolsOpt is a Value type for parsing the default address pools definitions
+type PoolsOpt struct {
+ Values []*types.NetworkToSplit
+}
+
+// UnmarshalJSON fills values structure info from JSON input
+func (p *PoolsOpt) UnmarshalJSON(raw []byte) error {
+ return json.Unmarshal(raw, &(p.Values))
+}
+
+// Set predefined pools
+func (p *PoolsOpt) Set(value string) error {
+ csvReader := csv.NewReader(strings.NewReader(value))
+ fields, err := csvReader.Read()
+ if err != nil {
+ return err
+ }
+
+ poolsDef := types.NetworkToSplit{}
+
+ for _, field := range fields {
+ parts := strings.SplitN(field, "=", 2)
+ if len(parts) != 2 {
+ return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
+ }
+
+ key := strings.ToLower(parts[0])
+ value := strings.ToLower(parts[1])
+
+ switch key {
+ case "base":
+ poolsDef.Base = value
+ case "size":
+ size, err := strconv.Atoi(value)
+ if err != nil {
+ return fmt.Errorf("invalid size value: %q (must be integer): %v", value, err)
+ }
+ poolsDef.Size = size
+ default:
+ return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
+ }
+ }
+
+ p.Values = append(p.Values, &poolsDef)
+
+ return nil
+}
+
+// Type returns the type of this option
+func (p *PoolsOpt) Type() string {
+ return "pool-options"
+}
+
+// String returns a string repr of this option
+func (p *PoolsOpt) String() string {
+ var pools []string
+ for _, pool := range p.Values {
+ repr := fmt.Sprintf("%s %d", pool.Base, pool.Size)
+ pools = append(pools, repr)
+ }
+ return strings.Join(pools, ", ")
+}
+
+// Value returns the mounts
+func (p *PoolsOpt) Value() []*types.NetworkToSplit {
+ return p.Values
+}
+
+// Name returns the flag name of this option
+func (p *PoolsOpt) Name() string {
+ return "default-address-pools"
+}
diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go
new file mode 100644
index 000000000..97e1a8c8a
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/env.go
@@ -0,0 +1,30 @@
+package opts // import "github.com/docker/docker/opts"
+
+import (
+ "os"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// ValidateEnv validates an environment variable and returns it.
+// If no value is specified, it obtains its value from the current environment
+//
+// As on ParseEnvFile and related to #16585, environment variable names
+// are not validate whatsoever, it's up to application inside docker
+// to validate them or not.
+//
+// The only validation here is to check if name is empty, per #25099
+func ValidateEnv(val string) (string, error) {
+ arr := strings.SplitN(val, "=", 2)
+ if arr[0] == "" {
+ return "", errors.New("invalid environment variable: " + val)
+ }
+ if len(arr) > 1 {
+ return val, nil
+ }
+ if envVal, ok := os.LookupEnv(arr[0]); ok {
+ return arr[0] + "=" + envVal, nil
+ }
+ return val, nil
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go
new file mode 100644
index 000000000..a3123adef
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts.go
@@ -0,0 +1,183 @@
+package opts // import "github.com/docker/docker/opts"
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/docker/docker/pkg/homedir"
+)
+
+const (
+ // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp://
+ // These are the IANA registered port numbers for use with Docker
+ // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
+ DefaultHTTPPort = 2375 // Default HTTP Port
+ // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
+ DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
+ // DefaultUnixSocket Path for the unix socket.
+ // Docker daemon by default always listens on the default unix socket
+ DefaultUnixSocket = "/var/run/docker.sock"
+ // DefaultTCPHost constant defines the default host string used by docker on Windows
+ DefaultTCPHost = "tcp://" + DefaultHTTPHost + ":2375"
+ // DefaultTLSHost constant defines the default host string used by docker for TLS sockets
+ DefaultTLSHost = "tcp://" + DefaultHTTPHost + ":2376"
+ // DefaultNamedPipe defines the default named pipe used by docker on Windows
+ DefaultNamedPipe = `//./pipe/docker_engine`
+ // HostGatewayName is the string value that can be passed
+ // to the IPAddr section in --add-host that is replaced by
+ // the value of HostGatewayIP daemon config value
+ HostGatewayName = "host-gateway"
+)
+
+// ValidateHost validates that the specified string is a valid host and returns it.
+func ValidateHost(val string) (string, error) {
+ host := strings.TrimSpace(val)
+ // The empty string means default and is not handled by parseDaemonHost
+ if host != "" {
+ _, err := parseDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ // Note: unlike most flag validators, we don't return the mutated value here
+ // we need to know what the user entered later (using ParseHost) to adjust for TLS
+ return val, nil
+}
+
+// ParseHost and set defaults for a Daemon host string.
+// defaultToTLS is preferred over defaultToUnixXDG.
+func ParseHost(defaultToTLS, defaultToUnixXDG bool, val string) (string, error) {
+ host := strings.TrimSpace(val)
+ if host == "" {
+ if defaultToTLS {
+ host = DefaultTLSHost
+ } else if defaultToUnixXDG {
+ runtimeDir, err := homedir.GetRuntimeDir()
+ if err != nil {
+ return "", err
+ }
+ socket := filepath.Join(runtimeDir, "docker.sock")
+ host = "unix://" + socket
+ } else {
+ host = DefaultHost
+ }
+ } else {
+ var err error
+ host, err = parseDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ return host, nil
+}
+
+// parseDaemonHost parses the specified address and returns an address that will be used as the host.
+// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
+func parseDaemonHost(addr string) (string, error) {
+ addrParts := strings.SplitN(addr, "://", 2)
+ if len(addrParts) == 1 && addrParts[0] != "" {
+ addrParts = []string{"tcp", addrParts[0]}
+ }
+
+ switch addrParts[0] {
+ case "tcp":
+ return ParseTCPAddr(addrParts[1], DefaultTCPHost)
+ case "unix":
+ return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
+ case "npipe":
+ return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
+ case "fd":
+ return addr, nil
+ default:
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+}
+
+// parseSimpleProtoAddr parses and validates that the specified address is a valid
+// socket address for simple protocols like unix and npipe. It returns a formatted
+// socket address, either using the address parsed from addr, or the contents of
+// defaultAddr if addr is a blank string.
+func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
+ addr = strings.TrimPrefix(addr, proto+"://")
+ if strings.Contains(addr, "://") {
+ return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
+ }
+ if addr == "" {
+ addr = defaultAddr
+ }
+ return fmt.Sprintf("%s://%s", proto, addr), nil
+}
+
+// ParseTCPAddr parses and validates that the specified address is a valid TCP
+// address. It returns a formatted TCP address, either using the address parsed
+// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
+// tryAddr is expected to have already been Trim()'d
+// defaultAddr must be in the full `tcp://host:port` form
+func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
+ if tryAddr == "" || tryAddr == "tcp://" {
+ return defaultAddr, nil
+ }
+ addr := strings.TrimPrefix(tryAddr, "tcp://")
+ if strings.Contains(addr, "://") || addr == "" {
+ return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
+ }
+
+ defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
+ defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
+ if err != nil {
+ return "", err
+ }
+ // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
+ // not 1.4. See https://github.com/golang/go/issues/12200 and
+ // https://github.com/golang/go/issues/6530.
+ if strings.HasSuffix(addr, "]:") {
+ addr += defaultPort
+ }
+
+ u, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", err
+ }
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // try port addition once
+ host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
+ }
+ if err != nil {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ if host == "" {
+ host = defaultHost
+ }
+ if port == "" {
+ port = defaultPort
+ }
+ p, err := strconv.Atoi(port)
+ if err != nil && p == 0 {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
+}
+
+// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
+// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6).
+func ValidateExtraHost(val string) (string, error) {
+ // allow for IPv6 addresses in extra hosts by only splitting on first ":"
+ arr := strings.SplitN(val, ":", 2)
+ if len(arr) != 2 || len(arr[0]) == 0 {
+ return "", fmt.Errorf("bad format for add-host: %q", val)
+ }
+ // Skip IPaddr validation for special "host-gateway" string
+ if arr[1] != HostGatewayName {
+ if _, err := ValidateIPAddress(arr[1]); err != nil {
+ return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
+ }
+ }
+ return val, nil
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go
new file mode 100644
index 000000000..29864194a
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package opts // import "github.com/docker/docker/opts"
+
+const (
+ // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
+ DefaultHTTPHost = "localhost"
+
+ // DefaultHost constant defines the default host string used by docker on other hosts than Windows
+ DefaultHost = "unix://" + DefaultUnixSocket
+)
diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go
new file mode 100644
index 000000000..576236ba4
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_windows.go
@@ -0,0 +1,60 @@
+package opts // import "github.com/docker/docker/opts"
+
+const (
+ // TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
+ //
+ // On Windows, this mitigates a problem with the default options of running
+ // a docker client against a local docker daemon on TP5.
+ //
+ // What was found that if the default host is "localhost", even if the client
+ // (and daemon as this is local) is not physically on a network, and the DNS
+ // cache is flushed (ipconfig /flushdns), then the client will pause for
+ // exactly one second when connecting to the daemon for calls. For example
+ // using docker run windowsservercore cmd, the CLI will send a create followed
+ // by an attach. You see the delay between the attach finishing and the attach
+ // being seen by the daemon.
+ //
+ // Here's some daemon debug logs with additional debug spew put in. The
+ // AfterWriteJSON log is the very last thing the daemon does as part of the
+ // create call. The POST /attach is the second CLI call. Notice the second
+ // time gap.
+ //
+ // time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
+ // time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
+ // time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
+ // time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
+ // time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
+ // time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
+ // time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
+ // time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
+ // time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
+ // time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
+ // time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
+ // ... 1 second gap here....
+ // time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
+ // time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
+ //
+ // We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
+ // in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
+ // the Windows networking stack is supposed to resolve "localhost" internally,
+ // without hitting DNS, or even reading the hosts file (which is why localhost
+ // is commented out in the hosts file on Windows).
+ //
+ // We have validated that working around this using the actual IPv4 localhost
+ // address does not cause the delay.
+ //
+ // This does not occur with the docker client built with 1.4.3 on the same
+ // Windows build, regardless of whether the daemon is built using 1.5.1
+ // or 1.4.3. It does not occur on Linux. We also verified we see the same thing
+ // on a cross-compiled Windows binary (from Linux).
+ //
+ // Final note: This is a mitigation, not a 'real' fix. It is still susceptible
+ // to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
+ // explicitly.
+
+ // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
+ DefaultHTTPHost = "127.0.0.1"
+
+ // DefaultHost constant defines the default host string used by docker on Windows
+ DefaultHost = "npipe://" + DefaultNamedPipe
+)
diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go
new file mode 100644
index 000000000..cfbff3a9f
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ip.go
@@ -0,0 +1,47 @@
+package opts // import "github.com/docker/docker/opts"
+
+import (
+ "fmt"
+ "net"
+)
+
+// IPOpt holds an IP. It is used to store values from CLI flags.
+type IPOpt struct {
+ *net.IP
+}
+
+// NewIPOpt creates a new IPOpt from a reference net.IP and a
+// string representation of an IP. If the string is not a valid
+// IP it will fallback to the specified reference.
+func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
+ o := &IPOpt{
+ IP: ref,
+ }
+ o.Set(defaultVal)
+ return o
+}
+
+// Set sets an IPv4 or IPv6 address from a given string. If the given
+// string is not parsable as an IP address it returns an error.
+func (o *IPOpt) Set(val string) error {
+ ip := net.ParseIP(val)
+ if ip == nil {
+ return fmt.Errorf("%s is not an ip address", val)
+ }
+ *o.IP = ip
+ return nil
+}
+
+// String returns the IP address stored in the IPOpt. If stored IP is a
+// nil pointer, it returns an empty string.
+func (o *IPOpt) String() string {
+ if *o.IP == nil {
+ return ""
+ }
+ return o.IP.String()
+}
+
+// Type returns the type of the option
+func (o *IPOpt) Type() string {
+ return "ip"
+}
diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go
new file mode 100644
index 000000000..60a093f28
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts.go
@@ -0,0 +1,348 @@
+package opts // import "github.com/docker/docker/opts"
+
+import (
+ "fmt"
+ "net"
+ "path"
+ "regexp"
+ "strings"
+
+ units "github.com/docker/go-units"
+)
+
+var (
+ alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
+ domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+)
+
+// ListOpts holds a list of values and a validation function.
+type ListOpts struct {
+ values *[]string
+ validator ValidatorFctType
+}
+
+// NewListOpts creates a new ListOpts with the specified validator.
+func NewListOpts(validator ValidatorFctType) ListOpts {
+ var values []string
+ return *NewListOptsRef(&values, validator)
+}
+
+// NewListOptsRef creates a new ListOpts with the specified values and validator.
+func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
+ return &ListOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+func (opts *ListOpts) String() string {
+ if len(*opts.values) == 0 {
+ return ""
+ }
+ return fmt.Sprintf("%v", *opts.values)
+}
+
+// Set validates if needed the input value and adds it to the
+// internal slice.
+func (opts *ListOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ *opts.values = append(*opts.values, value)
+ return nil
+}
+
+// Delete removes the specified element from the slice.
+func (opts *ListOpts) Delete(key string) {
+ for i, k := range *opts.values {
+ if k == key {
+ *opts.values = append((*opts.values)[:i], (*opts.values)[i+1:]...)
+ return
+ }
+ }
+}
+
+// GetMap returns the content of values in a map in order to avoid
+// duplicates.
+func (opts *ListOpts) GetMap() map[string]struct{} {
+ ret := make(map[string]struct{})
+ for _, k := range *opts.values {
+ ret[k] = struct{}{}
+ }
+ return ret
+}
+
+// GetAll returns the values of slice.
+func (opts *ListOpts) GetAll() []string {
+ return *opts.values
+}
+
+// GetAllOrEmpty returns the values of the slice
+// or an empty slice when there are no values.
+func (opts *ListOpts) GetAllOrEmpty() []string {
+ v := *opts.values
+ if v == nil {
+ return make([]string, 0)
+ }
+ return v
+}
+
+// Get checks the existence of the specified key.
+func (opts *ListOpts) Get(key string) bool {
+ for _, k := range *opts.values {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+// Len returns the amount of element in the slice.
+func (opts *ListOpts) Len() int {
+ return len(*opts.values)
+}
+
+// Type returns a string name for this Option type
+func (opts *ListOpts) Type() string {
+ return "list"
+}
+
+// WithValidator returns the ListOpts with validator set.
+func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
+ opts.validator = validator
+ return opts
+}
+
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+ Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+ name string
+ ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+ return &NamedListOpts{
+ name: name,
+ ListOpts: *NewListOptsRef(values, validator),
+ }
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+ return o.name
+}
+
+// MapOpts holds a map of values and a validation function.
+type MapOpts struct {
+ values map[string]string
+ validator ValidatorFctType
+}
+
+// Set validates if needed the input value and add it to the
+// internal map, by splitting on '='.
+func (opts *MapOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ vals := strings.SplitN(value, "=", 2)
+ if len(vals) == 1 {
+ (opts.values)[vals[0]] = ""
+ } else {
+ (opts.values)[vals[0]] = vals[1]
+ }
+ return nil
+}
+
+// GetAll returns the values of MapOpts as a map.
+func (opts *MapOpts) GetAll() map[string]string {
+ return opts.values
+}
+
+func (opts *MapOpts) String() string {
+ return fmt.Sprintf("%v", opts.values)
+}
+
+// Type returns a string name for this Option type
+func (opts *MapOpts) Type() string {
+ return "map"
+}
+
+// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
+func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
+ if values == nil {
+ values = make(map[string]string)
+ }
+ return &MapOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+ name string
+ MapOpts
+}
+
+var _ NamedOption = &NamedMapOpts{}
+
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+ return &NamedMapOpts{
+ name: name,
+ MapOpts: *NewMapOpts(values, validator),
+ }
+}
+
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+ return o.name
+}
+
+// ValidatorFctType defines a validator function that returns a validated string and/or an error.
+type ValidatorFctType func(val string) (string, error)
+
+// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
+type ValidatorFctListType func(val string) ([]string, error)
+
+// ValidateIPAddress validates an Ip address.
+func ValidateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// ValidateDNSSearch validates domain for resolvconf search configuration.
+// A zero length domain is represented by a dot (.).
+func ValidateDNSSearch(val string) (string, error) {
+ if val = strings.Trim(val, " "); val == "." {
+ return val, nil
+ }
+ return validateDomain(val)
+}
+
+func validateDomain(val string) (string, error) {
+ if alphaRegexp.FindString(val) == "" {
+ return "", fmt.Errorf("%s is not a valid domain", val)
+ }
+ ns := domainRegexp.FindSubmatch([]byte(val))
+ if len(ns) > 0 && len(ns[1]) < 255 {
+ return string(ns[1]), nil
+ }
+ return "", fmt.Errorf("%s is not a valid domain", val)
+}
+
+// ValidateLabel validates that the specified string is a valid label,
+// it does not use the reserved namespaces com.docker.*, io.docker.*, org.dockerproject.*
+// and returns it.
+// Labels are in the form on key=value.
+func ValidateLabel(val string) (string, error) {
+ if strings.Count(val, "=") < 1 {
+ return "", fmt.Errorf("bad attribute format: %s", val)
+ }
+
+ lowered := strings.ToLower(val)
+ if strings.HasPrefix(lowered, "com.docker.") || strings.HasPrefix(lowered, "io.docker.") ||
+ strings.HasPrefix(lowered, "org.dockerproject.") {
+ return "", fmt.Errorf(
+ "label %s is not allowed: the namespaces com.docker.*, io.docker.*, and org.dockerproject.* are reserved for internal use",
+ val)
+ }
+
+ return val, nil
+}
+
+// ValidateSingleGenericResource validates that a single entry in the
+// generic resource list is valid.
+// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't
+func ValidateSingleGenericResource(val string) (string, error) {
+ if strings.Count(val, "=") < 1 {
+ return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val)
+ }
+ return val, nil
+}
+
+// ParseLink parses and validates the specified string as a link format (name:alias)
+func ParseLink(val string) (string, string, error) {
+ if val == "" {
+ return "", "", fmt.Errorf("empty string specified for links")
+ }
+ arr := strings.Split(val, ":")
+ if len(arr) > 2 {
+ return "", "", fmt.Errorf("bad format for links: %s", val)
+ }
+ if len(arr) == 1 {
+ return val, val, nil
+ }
+ // This is kept because we can actually get a HostConfig with links
+ // from an already created container and the format is not `foo:bar`
+ // but `/foo:/c1/bar`
+ if strings.HasPrefix(arr[0], "/") {
+ _, alias := path.Split(arr[1])
+ return arr[0][1:], alias, nil
+ }
+ return arr[0], arr[1], nil
+}
+
+// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
+type MemBytes int64
+
+// String returns the string format of the human readable memory bytes
+func (m *MemBytes) String() string {
+ // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
+ // We return "0" in case value is 0 here so that the default value is hidden.
+ // (Sometimes "default 0 B" is actually misleading)
+ if m.Value() != 0 {
+ return units.BytesSize(float64(m.Value()))
+ }
+ return "0"
+}
+
+// Set sets the value of the MemBytes by passing a string
+func (m *MemBytes) Set(value string) error {
+ val, err := units.RAMInBytes(value)
+ *m = MemBytes(val)
+ return err
+}
+
+// Type returns the type
+func (m *MemBytes) Type() string {
+ return "bytes"
+}
+
+// Value returns the value in int64
+func (m *MemBytes) Value() int64 {
+ return int64(*m)
+}
+
+// UnmarshalJSON is the customized unmarshaler for MemBytes
+func (m *MemBytes) UnmarshalJSON(s []byte) error {
+ if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return fmt.Errorf("invalid size: %q", s)
+ }
+ val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
+ *m = MemBytes(val)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go
new file mode 100644
index 000000000..6c889070e
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/quotedstring.go
@@ -0,0 +1,37 @@
+package opts // import "github.com/docker/docker/opts"
+
+// QuotedString is a string that may have extra quotes around the value. The
+// quotes are stripped from the value.
+type QuotedString struct {
+ value *string
+}
+
+// Set sets a new value
+func (s *QuotedString) Set(val string) error {
+ *s.value = trimQuotes(val)
+ return nil
+}
+
+// Type returns the type of the value
+func (s *QuotedString) Type() string {
+ return "string"
+}
+
+func (s *QuotedString) String() string {
+ return *s.value
+}
+
+func trimQuotes(value string) string {
+ lastIndex := len(value) - 1
+ for _, char := range []byte{'\'', '"'} {
+ if value[0] == char && value[lastIndex] == char {
+ return value[1:lastIndex]
+ }
+ }
+ return value
+}
+
+// NewQuotedString returns a new quoted string option
+func NewQuotedString(value *string) *QuotedString {
+ return &QuotedString{value: value}
+}
diff --git a/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go
new file mode 100644
index 000000000..4b9babf0a
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/runtime.go
@@ -0,0 +1,79 @@
+package opts // import "github.com/docker/docker/opts"
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+)
+
+// RuntimeOpt defines a map of Runtimes
+type RuntimeOpt struct {
+ name string
+ stockRuntimeName string
+ values *map[string]types.Runtime
+}
+
+// NewNamedRuntimeOpt creates a new RuntimeOpt
+func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt {
+ if ref == nil {
+ ref = &map[string]types.Runtime{}
+ }
+ return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime}
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *RuntimeOpt) Name() string {
+ return o.name
+}
+
+// Set validates and updates the list of Runtimes
+func (o *RuntimeOpt) Set(val string) error {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return fmt.Errorf("invalid runtime argument: %s", val)
+ }
+
+ parts[0] = strings.TrimSpace(parts[0])
+ parts[1] = strings.TrimSpace(parts[1])
+ if parts[0] == "" || parts[1] == "" {
+ return fmt.Errorf("invalid runtime argument: %s", val)
+ }
+
+ parts[0] = strings.ToLower(parts[0])
+ if parts[0] == o.stockRuntimeName {
+ return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName)
+ }
+
+ if _, ok := (*o.values)[parts[0]]; ok {
+ return fmt.Errorf("runtime '%s' was already defined", parts[0])
+ }
+
+ (*o.values)[parts[0]] = types.Runtime{Path: parts[1]}
+
+ return nil
+}
+
+// String returns Runtime values as a string.
+func (o *RuntimeOpt) String() string {
+ var out []string
+ for k := range *o.values {
+ out = append(out, k)
+ }
+
+ return fmt.Sprintf("%v", out)
+}
+
+// GetMap returns a map of Runtimes (name: path)
+func (o *RuntimeOpt) GetMap() map[string]types.Runtime {
+ if o.values != nil {
+ return *o.values
+ }
+
+ return map[string]types.Runtime{}
+}
+
+// Type returns the type of the option
+func (o *RuntimeOpt) Type() string {
+ return "runtime"
+}
diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go
new file mode 100644
index 000000000..61cc58d4d
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ulimit.go
@@ -0,0 +1,81 @@
+package opts // import "github.com/docker/docker/opts"
+
+import (
+ "fmt"
+
+ units "github.com/docker/go-units"
+)
+
+// UlimitOpt defines a map of Ulimits
+type UlimitOpt struct {
+ values *map[string]*units.Ulimit
+}
+
+// NewUlimitOpt creates a new UlimitOpt
+func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt {
+ if ref == nil {
+ ref = &map[string]*units.Ulimit{}
+ }
+ return &UlimitOpt{ref}
+}
+
+// Set validates a Ulimit and sets its name as a key in UlimitOpt
+func (o *UlimitOpt) Set(val string) error {
+ l, err := units.ParseUlimit(val)
+ if err != nil {
+ return err
+ }
+
+ (*o.values)[l.Name] = l
+
+ return nil
+}
+
+// String returns Ulimit values as a string.
+func (o *UlimitOpt) String() string {
+ var out []string
+ for _, v := range *o.values {
+ out = append(out, v.String())
+ }
+
+ return fmt.Sprintf("%v", out)
+}
+
+// GetList returns a slice of pointers to Ulimits.
+func (o *UlimitOpt) GetList() []*units.Ulimit {
+ var ulimits []*units.Ulimit
+ for _, v := range *o.values {
+ ulimits = append(ulimits, v)
+ }
+
+ return ulimits
+}
+
+// Type returns the option type
+func (o *UlimitOpt) Type() string {
+ return "ulimit"
+}
+
+// NamedUlimitOpt defines a named map of Ulimits
+type NamedUlimitOpt struct {
+ name string
+ UlimitOpt
+}
+
+var _ NamedOption = &NamedUlimitOpt{}
+
+// NewNamedUlimitOpt creates a new NamedUlimitOpt
+func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt {
+ if ref == nil {
+ ref = &map[string]*units.Ulimit{}
+ }
+ return &NamedUlimitOpt{
+ name: name,
+ UlimitOpt: *NewUlimitOpt(ref),
+ }
+}
+
+// Name returns the option name
+func (o *NamedUlimitOpt) Name() string {
+ return o.name
+}
diff --git a/vendor/github.com/docker/libnetwork/ipamutils/utils.go b/vendor/github.com/docker/libnetwork/ipamutils/utils.go
new file mode 100644
index 000000000..3fd37cd88
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/ipamutils/utils.go
@@ -0,0 +1,135 @@
+// Package ipamutils provides utility functions for ipam management
+package ipamutils
+
+import (
+ "fmt"
+ "net"
+ "sync"
+)
+
+var (
+ // PredefinedLocalScopeDefaultNetworks contains a list of 31 IPv4 private networks with host size 16 and 12
+ // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGlobalScopeDefaultNetworks`
+ PredefinedLocalScopeDefaultNetworks []*net.IPNet
+ // PredefinedGlobalScopeDefaultNetworks contains a list of 64K IPv4 private networks with host size 8
+ // (10.x.x.x/24) which do not overlap with the networks in `PredefinedLocalScopeDefaultNetworks`
+ PredefinedGlobalScopeDefaultNetworks []*net.IPNet
+ mutex sync.Mutex
+ localScopeDefaultNetworks = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16},
+ {"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16},
+ {"192.168.0.0/16", 20}}
+ globalScopeDefaultNetworks = []*NetworkToSplit{{"10.0.0.0/8", 24}}
+)
+
+// NetworkToSplit represent a network that has to be split in chunks with mask length Size.
+// Each subnet in the set is derived from the Base pool. Base is to be passed
+// in CIDR format.
+// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256
+// 10.10.[0-255].0/24 address pools
+type NetworkToSplit struct {
+ Base string `json:"base"`
+ Size int `json:"size"`
+}
+
+func init() {
+ var err error
+ if PredefinedGlobalScopeDefaultNetworks, err = splitNetworks(globalScopeDefaultNetworks); err != nil {
+ //we are going to panic in case of error as we should never get into this state
+ panic("InitAddressPools failed to initialize the global scope default address pool")
+ }
+
+ if PredefinedLocalScopeDefaultNetworks, err = splitNetworks(localScopeDefaultNetworks); err != nil {
+ //we are going to panic in case of error as we should never get into this state
+ panic("InitAddressPools failed to initialize the local scope default address pool")
+ }
+}
+
+// configDefaultNetworks configures local as well global default pool based on input
+func configDefaultNetworks(defaultAddressPool []*NetworkToSplit, result *[]*net.IPNet) error {
+ mutex.Lock()
+ defer mutex.Unlock()
+ defaultNetworks, err := splitNetworks(defaultAddressPool)
+ if err != nil {
+ return err
+ }
+ *result = defaultNetworks
+ return nil
+}
+
+// GetGlobalScopeDefaultNetworks returns PredefinedGlobalScopeDefaultNetworks
+func GetGlobalScopeDefaultNetworks() []*net.IPNet {
+ mutex.Lock()
+ defer mutex.Unlock()
+ return PredefinedGlobalScopeDefaultNetworks
+}
+
+// GetLocalScopeDefaultNetworks returns PredefinedLocalScopeDefaultNetworks
+func GetLocalScopeDefaultNetworks() []*net.IPNet {
+ mutex.Lock()
+ defer mutex.Unlock()
+ return PredefinedLocalScopeDefaultNetworks
+}
+
+// ConfigGlobalScopeDefaultNetworks configures global default pool.
+// Ideally this will be called from SwarmKit as part of swarm init
+func ConfigGlobalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error {
+ if defaultAddressPool == nil {
+ defaultAddressPool = globalScopeDefaultNetworks
+ }
+ return configDefaultNetworks(defaultAddressPool, &PredefinedGlobalScopeDefaultNetworks)
+}
+
+// ConfigLocalScopeDefaultNetworks configures local default pool.
+// Ideally this will be called during libnetwork init
+func ConfigLocalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error {
+ if defaultAddressPool == nil {
+ return nil
+ }
+ return configDefaultNetworks(defaultAddressPool, &PredefinedLocalScopeDefaultNetworks)
+}
+
+// splitNetworks takes a slice of networks, split them accordingly and returns them
+func splitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) {
+ localPools := make([]*net.IPNet, 0, len(list))
+
+ for _, p := range list {
+ _, b, err := net.ParseCIDR(p.Base)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err)
+ }
+ ones, _ := b.Mask.Size()
+ if p.Size <= 0 || p.Size < ones {
+ return nil, fmt.Errorf("invalid pools size: %d", p.Size)
+ }
+ localPools = append(localPools, splitNetwork(p.Size, b)...)
+ }
+ return localPools, nil
+}
+
+func splitNetwork(size int, base *net.IPNet) []*net.IPNet {
+ one, bits := base.Mask.Size()
+ mask := net.CIDRMask(size, bits)
+ n := 1 << uint(size-one)
+ s := uint(bits - size)
+ list := make([]*net.IPNet, 0, n)
+
+ for i := 0; i < n; i++ {
+ ip := copyIP(base.IP)
+ addIntToIP(ip, uint(i<<s))
+ list = append(list, &net.IPNet{IP: ip, Mask: mask})
+ }
+ return list
+}
+
+func copyIP(from net.IP) net.IP {
+ ip := make([]byte, len(from))
+ copy(ip, from)
+ return ip
+}
+
+func addIntToIP(array net.IP, ordinal uint) {
+ for i := len(array) - 1; i >= 0; i-- {
+ array[i] |= (byte)(ordinal & 0xff)
+ ordinal >>= 8
+ }
+}
diff --git a/vendor/github.com/jinzhu/copier/copier.go b/vendor/github.com/jinzhu/copier/copier.go
index 6d21da869..6dc9600c8 100644
--- a/vendor/github.com/jinzhu/copier/copier.go
+++ b/vendor/github.com/jinzhu/copier/copier.go
@@ -24,6 +24,13 @@ const (
// Denotes that the value as been copied
hasCopied
+
+ // Some default converter types for a nicer syntax
+ String string = ""
+ Bool bool = false
+ Int int = 0
+ Float32 float32 = 0
+ Float64 float64 = 0
)
// Option sets copy options
@@ -32,6 +39,18 @@ type Option struct {
// struct having all it's fields set to their zero values respectively (see IsZero() in reflect/value.go)
IgnoreEmpty bool
DeepCopy bool
+ Converters []TypeConverter
+}
+
+type TypeConverter struct {
+ SrcType interface{}
+ DstType interface{}
+ Fn func(src interface{}) (interface{}, error)
+}
+
+type converterPair struct {
+ SrcType reflect.Type
+ DstType reflect.Type
}
// Tag Flags
@@ -59,12 +78,27 @@ func CopyWithOption(toValue interface{}, fromValue interface{}, opt Option) (err
func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) {
var (
- isSlice bool
- amount = 1
- from = indirect(reflect.ValueOf(fromValue))
- to = indirect(reflect.ValueOf(toValue))
+ isSlice bool
+ amount = 1
+ from = indirect(reflect.ValueOf(fromValue))
+ to = indirect(reflect.ValueOf(toValue))
+ converters map[converterPair]TypeConverter
)
+ // save convertes into map for faster lookup
+ for i := range opt.Converters {
+ if converters == nil {
+ converters = make(map[converterPair]TypeConverter)
+ }
+
+ pair := converterPair{
+ SrcType: reflect.TypeOf(opt.Converters[i].SrcType),
+ DstType: reflect.TypeOf(opt.Converters[i].DstType),
+ }
+
+ converters[pair] = opt.Converters[i]
+ }
+
if !to.CanAddr() {
return ErrInvalidCopyDestination
}
@@ -113,13 +147,16 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error)
for _, k := range from.MapKeys() {
toKey := indirect(reflect.New(toType.Key()))
- if !set(toKey, k, opt.DeepCopy) {
+ if !set(toKey, k, opt.DeepCopy, converters) {
return fmt.Errorf("%w map, old key: %v, new key: %v", ErrNotSupported, k.Type(), toType.Key())
}
- elemType, _ := indirectType(toType.Elem())
+ elemType := toType.Elem()
+ if elemType.Kind() != reflect.Slice {
+ elemType, _ = indirectType(elemType)
+ }
toValue := indirect(reflect.New(elemType))
- if !set(toValue, from.MapIndex(k), opt.DeepCopy) {
+ if !set(toValue, from.MapIndex(k), opt.DeepCopy, converters) {
if err = copier(toValue.Addr().Interface(), from.MapIndex(k).Interface(), opt); err != nil {
return err
}
@@ -148,7 +185,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error)
to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem()))
}
- if !set(to.Index(i), from.Index(i), opt.DeepCopy) {
+ if !set(to.Index(i), from.Index(i), opt.DeepCopy, converters) {
// ignore error while copy slice element
err = copier(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt)
if err != nil {
@@ -203,6 +240,8 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error)
// check source
if source.IsValid() {
+ copyUnexportedStructFields(dest, source)
+
// Copy from source field to dest field or method
fromTypeFields := deepFields(fromType)
for _, field := range fromTypeFields {
@@ -249,7 +288,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error)
toField := dest.FieldByName(destFieldName)
if toField.IsValid() {
if toField.CanSet() {
- if !set(toField, fromField, opt.DeepCopy) {
+ if !set(toField, fromField, opt.DeepCopy, converters) {
if err := copier(toField.Addr().Interface(), fromField.Interface(), opt); err != nil {
return err
}
@@ -291,7 +330,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error)
if toField := dest.FieldByName(destFieldName); toField.IsValid() && toField.CanSet() {
values := fromMethod.Call([]reflect.Value{})
if len(values) >= 1 {
- set(toField, values[0], opt.DeepCopy)
+ set(toField, values[0], opt.DeepCopy, converters)
}
}
}
@@ -303,7 +342,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error)
if to.Len() < i+1 {
to.Set(reflect.Append(to, dest.Addr()))
} else {
- if !set(to.Index(i), dest.Addr(), opt.DeepCopy) {
+ if !set(to.Index(i), dest.Addr(), opt.DeepCopy, converters) {
// ignore error while copy slice element
err = copier(to.Index(i).Addr().Interface(), dest.Addr().Interface(), opt)
if err != nil {
@@ -315,7 +354,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error)
if to.Len() < i+1 {
to.Set(reflect.Append(to, dest))
} else {
- if !set(to.Index(i), dest, opt.DeepCopy) {
+ if !set(to.Index(i), dest, opt.DeepCopy, converters) {
// ignore error while copy slice element
err = copier(to.Index(i).Addr().Interface(), dest.Interface(), opt)
if err != nil {
@@ -334,6 +373,24 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error)
return
}
+func copyUnexportedStructFields(to, from reflect.Value) {
+ if from.Kind() != reflect.Struct || to.Kind() != reflect.Struct || !from.Type().AssignableTo(to.Type()) {
+ return
+ }
+
+ // create a shallow copy of 'to' to get all fields
+ tmp := indirect(reflect.New(to.Type()))
+ tmp.Set(from)
+
+ // revert exported fields
+ for i := 0; i < to.NumField(); i++ {
+ if tmp.Field(i).CanSet() {
+ tmp.Field(i).Set(to.Field(i))
+ }
+ }
+ to.Set(tmp)
+}
+
func shouldIgnore(v reflect.Value, ignoreEmpty bool) bool {
if !ignoreEmpty {
return false
@@ -352,10 +409,10 @@ func deepFields(reflectType reflect.Type) []reflect.StructField {
// field name. It is empty for upper case (exported) field names.
// See https://golang.org/ref/spec#Uniqueness_of_identifiers
if v.PkgPath == "" {
+ fields = append(fields, v)
if v.Anonymous {
+ // also consider fields of anonymous fields as fields of the root
fields = append(fields, deepFields(v.Type)...)
- } else {
- fields = append(fields, v)
}
}
}
@@ -381,8 +438,14 @@ func indirectType(reflectType reflect.Type) (_ reflect.Type, isPtr bool) {
return reflectType, isPtr
}
-func set(to, from reflect.Value, deepCopy bool) bool {
+func set(to, from reflect.Value, deepCopy bool, converters map[converterPair]TypeConverter) bool {
if from.IsValid() {
+ if ok, err := lookupAndCopyWithConverter(to, from, converters); err != nil {
+ return false
+ } else if ok {
+ return true
+ }
+
if to.Kind() == reflect.Ptr {
// set `to` to nil if from is nil
if from.Kind() == reflect.Ptr && from.IsNil() {
@@ -416,6 +479,9 @@ func set(to, from reflect.Value, deepCopy bool) bool {
toKind = reflect.TypeOf(to.Interface()).Kind()
}
}
+ if from.Kind() == reflect.Ptr && from.IsNil() {
+ return true
+ }
if toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice {
return false
}
@@ -457,7 +523,7 @@ func set(to, from reflect.Value, deepCopy bool) bool {
to.Set(rv)
}
} else if from.Kind() == reflect.Ptr {
- return set(to, from.Elem(), deepCopy)
+ return set(to, from.Elem(), deepCopy, converters)
} else {
return false
}
@@ -466,6 +532,33 @@ func set(to, from reflect.Value, deepCopy bool) bool {
return true
}
+// lookupAndCopyWithConverter looks up the type pair, on success the TypeConverter Fn func is called to copy src to dst field.
+func lookupAndCopyWithConverter(to, from reflect.Value, converters map[converterPair]TypeConverter) (copied bool, err error) {
+ pair := converterPair{
+ SrcType: from.Type(),
+ DstType: to.Type(),
+ }
+
+ if cnv, ok := converters[pair]; ok {
+ result, err := cnv.Fn(from.Interface())
+
+ if err != nil {
+ return false, err
+ }
+
+ if result != nil {
+ to.Set(reflect.ValueOf(result))
+ } else {
+ // in case we've got a nil value to copy
+ to.Set(reflect.Zero(to.Type()))
+ }
+
+ return true, nil
+ }
+
+ return false, nil
+}
+
// parseTags Parses struct tags and returns uint8 bit flags.
func parseTags(tag string) (flg uint8, name string, err error) {
for _, t := range strings.Split(tag, ",") {
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index c9014ce1d..0af08e65e 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -3,6 +3,7 @@
before:
hooks:
- ./gen.sh
+ - go install mvdan.cc/garble@latest
builds:
-
@@ -31,6 +32,7 @@ builds:
- mips64le
goarm:
- 7
+ gobinary: garble
-
id: "s2d"
binary: s2d
@@ -57,6 +59,7 @@ builds:
- mips64le
goarm:
- 7
+ gobinary: garble
-
id: "s2sx"
binary: s2sx
@@ -84,6 +87,7 @@ builds:
- mips64le
goarm:
- 7
+ gobinary: garble
archives:
-
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 3429879eb..e8ff994f8 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,13 @@ This package provides various compression algorithms.
# changelog
+* Jan 11, 2022 (v1.14.1)
+ * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
+ * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
+ * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
+ * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
+ * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+
* Aug 30, 2021 (v1.13.5)
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
@@ -432,6 +439,13 @@ For more information see my blog post on [Fast Linear Time Compression](http://b
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
+# Other packages
+
+Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code):
+
+* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
+* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
+* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
# license
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 5283ac5a5..bffa2f332 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -6,6 +6,7 @@
package flate
import (
+ "encoding/binary"
"fmt"
"io"
"math"
@@ -37,15 +38,17 @@ const (
maxMatchLength = 258 // The longest match for the compressor
minOffsetSize = 1 // The shortest offset that makes any sense
- // The maximum number of tokens we put into a single flat block, just too
- // stop things from getting too large.
- maxFlateBlockTokens = 1 << 14
+ // The maximum number of tokens we will encode at the time.
+ // Smaller sizes usually creates less optimal blocks.
+ // Bigger can make context switching slow.
+ // We use this for levels 7-9, so we make it big.
+ maxFlateBlockTokens = 1 << 15
maxStoreBlockSize = 65535
hashBits = 17 // After 17 performance degrades
hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
- maxHashOffset = 1 << 24
+ maxHashOffset = 1 << 28
skipNever = math.MaxInt32
@@ -70,9 +73,9 @@ var levels = []compressionLevel{
{0, 0, 0, 0, 0, 6},
// Levels 7-9 use increasingly more lazy matching
// and increasingly stringent conditions for "good enough".
- {8, 8, 24, 16, skipNever, 7},
- {10, 16, 24, 64, skipNever, 8},
- {32, 258, 258, 4096, skipNever, 9},
+ {8, 12, 16, 24, skipNever, 7},
+ {16, 30, 40, 64, skipNever, 8},
+ {32, 258, 258, 1024, skipNever, 9},
}
// advancedState contains state for the advanced levels, with bigger hash tables, etc.
@@ -93,8 +96,9 @@ type advancedState struct {
hashOffset int
// input window: unprocessed data is window[index:windowEnd]
- index int
- hashMatch [maxMatchLength + minMatchLength]uint32
+ index int
+ estBitsPerByte int
+ hashMatch [maxMatchLength + minMatchLength]uint32
hash uint32
ii uint16 // position of last match, intended to overflow to reset.
@@ -103,6 +107,7 @@ type advancedState struct {
type compressor struct {
compressionLevel
+ h *huffmanEncoder
w *huffmanBitWriter
// compression algorithm
@@ -170,7 +175,8 @@ func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
window = d.window[d.blockStart:index]
}
d.blockStart = index
- d.w.writeBlock(tok, eof, window)
+ //d.w.writeBlock(tok, eof, window)
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
return d.w.err
}
return nil
@@ -263,7 +269,7 @@ func (d *compressor) fillWindow(b []byte) {
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
-func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
minMatchLook := maxMatchLength
if lookahead < minMatchLook {
minMatchLook = lookahead
@@ -279,36 +285,75 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
- length = prevLength
- if length >= d.good {
- tries >>= 2
- }
+ length = minMatchLength - 1
wEnd := win[pos+length]
wPos := win[pos:]
minIndex := pos - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+ offset = 0
+
+ cGain := 0
+ if d.chain < 100 {
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+ if n > length {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i <= minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex {
+ break
+ }
+ }
+ return
+ }
+ // Some like it higher (CSV), some like it lower (JSON)
+ const baseCost = 6
+ // Base is 4 bytes at with an additional cost.
+ // Matches must be better than this.
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
-
- if n > length && (n > minMatchLength || pos-i <= 4096) {
- length = n
- offset = pos - i
- ok = true
- if n >= nice {
- // The match is good enough that we don't try to find a better one.
- break
+ if n > length {
+ // Calculate gain. Estimate
+ newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
+
+ //fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]))
+ if newGain > cGain {
+ length = n
+ offset = pos - i
+ cGain = newGain
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
}
- wEnd = win[pos+n]
}
}
- if i == minIndex {
+ if i <= minIndex {
// hashPrev[i & windowMask] has already been overwritten, so stop now.
break
}
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
- if i < minIndex || i < 0 {
+ if i < minIndex {
break
}
}
@@ -327,8 +372,7 @@ func (d *compressor) writeStoredBlock(buf []byte) error {
// of the supplied slice.
// The caller must ensure that len(b) >= 4.
func hash4(b []byte) uint32 {
- b = b[:4]
- return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
+ return hash4u(binary.LittleEndian.Uint32(b), hashBits)
}
// bulkHash4 will compute hashes using the same
@@ -337,11 +381,12 @@ func bulkHash4(b []byte, dst []uint32) {
if len(b) < 4 {
return
}
- hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+ hb := binary.LittleEndian.Uint32(b)
+
dst[0] = hash4u(hb, hashBits)
end := len(b) - 4 + 1
for i := 1; i < end; i++ {
- hb = (hb << 8) | uint32(b[i+3])
+ hb = (hb >> 8) | uint32(b[i+3])<<24
dst[i] = hash4u(hb, hashBits)
}
}
@@ -374,10 +419,21 @@ func (d *compressor) deflateLazy() {
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
+ if d.windowEnd != s.index && d.chain > 100 {
+ // Get literal huffman coder.
+ if d.h == nil {
+ d.h = newHuffmanEncoder(maxFlateBlockTokens)
+ }
+ var tmp [256]uint16
+ for _, v := range d.window[s.index:d.windowEnd] {
+ tmp[v]++
+ }
+ d.h.generate(tmp[:], 15)
+ }
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
if s.index < s.maxInsertIndex {
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ s.hash = hash4(d.window[s.index:])
}
for {
@@ -410,7 +466,7 @@ func (d *compressor) deflateLazy() {
}
if s.index < s.maxInsertIndex {
// Update the hash
- s.hash = hash4(d.window[s.index : s.index+minMatchLength])
+ s.hash = hash4(d.window[s.index:])
ch := s.hashHead[s.hash&hashMask]
s.chainHead = int(ch)
s.hashPrev[s.index&windowMask] = ch
@@ -426,12 +482,37 @@ func (d *compressor) deflateLazy() {
}
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
- if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
s.length = newLength
s.offset = newOffset
}
}
+
if prevLength >= minMatchLength && s.length <= prevLength {
+ // Check for better match at end...
+ //
+ // checkOff must be >=2 since we otherwise risk checking s.index
+ // Offset of 2 seems to yield best results.
+ const checkOff = 2
+ prevIndex := s.index - 1
+ if prevIndex+prevLength+checkOff < s.maxInsertIndex {
+ end := lookahead
+ if lookahead > maxMatchLength {
+ end = maxMatchLength
+ }
+ end += prevIndex
+ idx := prevIndex + prevLength - (4 - checkOff)
+ h := hash4(d.window[idx:])
+ ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff)
+ if ch2 > minIndex {
+ length := matchLen(d.window[prevIndex:end], d.window[ch2:])
+ // It seems like a pure length metric is best.
+ if length > prevLength {
+ prevLength = length
+ prevOffset = prevIndex - ch2
+ }
+ }
+ }
// There was a match at the previous step, and the current match is
// not better. Output the previous match.
d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
@@ -479,6 +560,7 @@ func (d *compressor) deflateLazy() {
}
d.tokens.Reset()
}
+ s.ii = 0
} else {
// Reset, if we got a match this run.
if s.length >= minMatchLength {
@@ -498,13 +580,12 @@ func (d *compressor) deflateLazy() {
// If we have a long run of no matches, skip additional bytes
// Resets when s.ii overflows after 64KB.
- if s.ii > 31 {
- n := int(s.ii >> 5)
+ if n := int(s.ii) - d.chain; n > 0 {
+ n = 1 + int(n>>6)
for j := 0; j < n; j++ {
if s.index >= d.windowEnd-1 {
break
}
-
d.tokens.AddLiteral(d.window[s.index-1])
if d.tokens.n == maxFlateBlockTokens {
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
@@ -512,6 +593,14 @@ func (d *compressor) deflateLazy() {
}
d.tokens.Reset()
}
+ // Index...
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
s.index++
}
// Flush last byte
@@ -611,7 +700,9 @@ func (d *compressor) write(b []byte) (n int, err error) {
}
n = len(b)
for len(b) > 0 {
- d.step(d)
+ if d.windowEnd == len(d.window) || d.sync {
+ d.step(d)
+ }
b = b[d.fill(d, b):]
if d.err != nil {
return 0, d.err
@@ -652,13 +743,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
level = 5
fallthrough
case level >= 1 && level <= 6:
- d.w.logNewTablePenalty = 8
+ d.w.logNewTablePenalty = 7
d.fast = newFastEnc(level)
d.window = make([]byte, maxStoreBlockSize)
d.fill = (*compressor).fillBlock
d.step = (*compressor).storeFast
case 7 <= level && level <= 9:
- d.w.logNewTablePenalty = 10
+ d.w.logNewTablePenalty = 8
d.state = &advancedState{}
d.compressionLevel = levels[level]
d.initDeflate()
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index a746eb733..0b2e54972 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -213,11 +213,9 @@ func (e *fastGen) Reset() {
// matchLen returns the maximum length.
// 'a' must be the shortest of the two.
func matchLen(a, b []byte) int {
- b = b[:len(a)]
var checked int
for len(a) >= 8 {
- b = b[:len(a)]
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
return checked + (bits.TrailingZeros64(diff) >> 3)
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 3ad5e9807..fd49efd75 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -52,18 +52,18 @@ var lengthBase = [32]uint8{
}
// offset code word extra bits.
-var offsetExtraBits = [64]int8{
+var offsetExtraBits = [32]int8{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
/* extended window */
- 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
+ 14, 14,
}
var offsetCombined = [32]uint32{}
func init() {
- var offsetBase = [64]uint32{
+ var offsetBase = [32]uint32{
/* normal deflate */
0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
@@ -73,9 +73,7 @@ func init() {
0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
/* extended window */
- 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
- 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
- 0x100000, 0x180000, 0x200000, 0x300000,
+ 0x008000, 0x00c000,
}
for i := range offsetCombined[:] {
@@ -155,37 +153,33 @@ func (w *huffmanBitWriter) reset(writer io.Writer) {
w.lastHuffMan = false
}
-func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
- offsets, lits = true, true
+func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
a := t.offHist[:offsetCodeCount]
- b := w.offsetFreq[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- offsets = false
- break
+ b := w.offsetEncoding.codes
+ b = b[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].len == 0 {
+ return false
}
}
a = t.extraHist[:literalCount-256]
- b = w.literalFreq[256:literalCount]
+ b = w.literalEncoding.codes[256:literalCount]
b = b[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- lits = false
- break
+ for i, v := range a {
+ if v != 0 && b[i].len == 0 {
+ return false
}
}
- if lits {
- a = t.litHist[:]
- b = w.literalFreq[:len(a)]
- for i := range a {
- if b[i] == 0 && a[i] != 0 {
- lits = false
- break
- }
+
+ a = t.litHist[:256]
+ b = w.literalEncoding.codes[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].len == 0 {
+ return false
}
}
- return
+ return true
}
func (w *huffmanBitWriter) flush() {
@@ -222,7 +216,7 @@ func (w *huffmanBitWriter) write(b []byte) {
}
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
- w.bits |= uint64(b) << w.nbits
+ w.bits |= uint64(b) << (w.nbits & 63)
w.nbits += nb
if w.nbits >= 48 {
w.writeOutBits()
@@ -423,7 +417,7 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
func (w *huffmanBitWriter) writeCode(c hcode) {
// The function does not get inlined if we "& 63" the shift.
- w.bits |= uint64(c.code) << w.nbits
+ w.bits |= uint64(c.code) << (w.nbits & 63)
w.nbits += c.len
if w.nbits >= 48 {
w.writeOutBits()
@@ -566,7 +560,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
w.lastHeader = 0
}
numLiterals, numOffsets := w.indexTokens(tokens, false)
- w.generate(tokens)
+ w.generate()
var extraBits int
storedSize, storable := w.storedSize(input)
if storable {
@@ -595,7 +589,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
}
// Stored bytes?
- if storable && storedSize < size {
+ if storable && storedSize <= size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
return
@@ -634,22 +628,39 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
w.lastHeader = 0
w.lastHuffMan = false
}
- if !sync {
- tokens.Fill()
+
+ // fillReuse enables filling of empty values.
+ // This will make encodings always reusable without testing.
+ // However, this does not appear to benefit on most cases.
+ const fillReuse = false
+
+ // Check if we can reuse...
+ if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
}
+
numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+ extraBits := 0
+ ssize, storable := w.storedSize(input)
+
+ const usePrefs = true
+ if storable || w.lastHeader > 0 {
+ extraBits = w.extraBitSize()
+ }
var size int
+
// Check if we should reuse.
if w.lastHeader > 0 {
// Estimate size for using a new table.
// Use the previous header size as the best estimate.
newSize := w.lastHeader + tokens.EstimatedBits()
- newSize += newSize >> w.logNewTablePenalty
+ newSize += int(w.literalEncoding.codes[endBlockMarker].len) + newSize>>w.logNewTablePenalty
// The estimated size is calculated as an optimal table.
// We add a penalty to make it more realistic and re-use a bit more.
- reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize()
+ reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
// Check if a new table is better.
if newSize < reuseSize {
@@ -660,35 +671,79 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
} else {
size = reuseSize
}
+
+ if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
// Check if we get a reasonable size decrease.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ if storable && ssize <= size {
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
- w.lastHeader = 0
return
}
}
// We want a new block/table
if w.lastHeader == 0 {
- w.generate(tokens)
+ if fillReuse && !sync {
+ w.fillTokens()
+ numLiterals, numOffsets = maxNumLit, maxNumDist
+ } else {
+ w.literalFreq[endBlockMarker] = 1
+ }
+
+ w.generate()
// Generate codegen and codegenFrequencies, which indicates how to encode
// the literalEncoding and the offsetEncoding.
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
w.codegenEncoding.generate(w.codegenFreq[:], 7)
+
var numCodegens int
- size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
- // Store bytes, if we don't get a reasonable improvement.
- if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
+ if fillReuse && !sync {
+ // Reindex for accurate size...
+ w.indexTokens(tokens, true)
+ }
+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ // Store predefined, if we don't get a reasonable improvement.
+ if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+ // Store bytes, if we don't get an improvement.
+ if storable && ssize <= preSize {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+
+ if storable && ssize <= size {
+ // Store bytes, if we don't get an improvement.
w.writeStoredHeader(len(input), eof)
w.writeBytes(input)
- w.lastHeader = 0
return
}
// Write Huffman table.
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
- w.lastHeader, _ = w.headerSize()
+ if !sync {
+ w.lastHeader, _ = w.headerSize()
+ }
w.lastHuffMan = false
}
@@ -699,6 +754,19 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
}
+func (w *huffmanBitWriter) fillTokens() {
+ for i, v := range w.literalFreq[:literalCount] {
+ if v == 0 {
+ w.literalFreq[i] = 1
+ }
+ }
+ for i, v := range w.offsetFreq[:offsetCodeCount] {
+ if v == 0 {
+ w.offsetFreq[i] = 1
+ }
+ }
+}
+
// indexTokens indexes a slice of tokens, and updates
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
@@ -733,7 +801,7 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num
return
}
-func (w *huffmanBitWriter) generate(t *tokens) {
+func (w *huffmanBitWriter) generate() {
w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
}
@@ -768,7 +836,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
if t < matchType {
//w.writeCode(lits[t.literal()])
c := lits[t.literal()]
- bits |= uint64(c.code) << nbits
+ bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -796,7 +864,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else {
// inlined
c := lengths[lengthCode&31]
- bits |= uint64(c.code) << nbits
+ bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -819,7 +887,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
if extraLengthBits > 0 {
//w.writeBits(extraLength, extraLengthBits)
extraLength := int32(length - lengthBase[lengthCode&31])
- bits |= uint64(extraLength) << nbits
+ bits |= uint64(extraLength) << (nbits & 63)
nbits += extraLengthBits
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -846,7 +914,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
} else {
// inlined
c := offs[offsetCode]
- bits |= uint64(c.code) << nbits
+ bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -867,7 +935,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
offsetComb := offsetCombined[offsetCode]
if offsetComb > 1<<16 {
//w.writeBits(extraOffset, extraOffsetBits)
- bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits
+ bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63)
nbits += uint16(offsetComb >> 16)
if nbits >= 48 {
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
@@ -996,10 +1064,41 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
encoding := w.literalEncoding.codes[:256]
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+ // Unroll, write 3 codes/loop.
+ // Fastest number of unrolls.
+ for len(input) > 3 {
+ // We must have at least 48 bits free.
+ if nbits >= 8 {
+ n := nbits >> 3
+ binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
+ bits >>= (n * 8) & 63
+ nbits -= n * 8
+ nbytes += uint8(n)
+ }
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ a, b := encoding[input[0]], encoding[input[1]]
+ bits |= uint64(a.code) << (nbits & 63)
+ bits |= uint64(b.code) << ((nbits + a.len) & 63)
+ c := encoding[input[2]]
+ nbits += b.len + a.len
+ bits |= uint64(c.code) << (nbits & 63)
+ nbits += c.len
+ input = input[3:]
+ }
+
+ // Remaining...
for _, t := range input {
// Bitwriting inlined, ~30% speedup
c := encoding[t]
- bits |= uint64(c.code) << nbits
+ bits |= uint64(c.code) << (nbits & 63)
nbits += c.len
if debugDeflate {
count += int(c.len)
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
index 67b2b3872..f35e00261 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -129,9 +129,7 @@ func (h *huffmanEncoder) bitLength(freq []uint16) int {
func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
var total int
for _, f := range b {
- if f != 0 {
- total += int(h.codes[f].len)
- }
+ total += int(h.codes[f].len)
}
return total
}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index d1edb356c..d5f62f6a2 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -328,11 +328,17 @@ func (f *decompressor) nextBlock() {
switch typ {
case 0:
f.dataBlock()
+ if debugDecode {
+ fmt.Println("stored block")
+ }
case 1:
// compressed, fixed Huffman tables
f.hl = &fixedHuffmanDecoder
f.hd = nil
f.huffmanBlockDecoder()()
+ if debugDecode {
+ fmt.Println("predefinied huffman block")
+ }
case 2:
// compressed, dynamic Huffman tables
if f.err = f.readHuffman(); f.err != nil {
@@ -341,6 +347,9 @@ func (f *decompressor) nextBlock() {
f.hl = &f.h1
f.hd = &f.h2
f.huffmanBlockDecoder()()
+ if debugDecode {
+ fmt.Println("dynamic huffman block")
+ }
default:
// 3 is reserved.
if debugDecode {
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
index eb862d7a9..3a9618ee1 100644
--- a/vendor/github.com/klauspost/compress/flate/token.go
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -129,11 +129,11 @@ var offsetCodes14 = [256]uint32{
type token uint32
type tokens struct {
- nLits int
extraHist [32]uint16 // codes 256->maxnumlit
offHist [32]uint16 // offset codes
litHist [256]uint16 // codes 0->255
- n uint16 // Must be able to contain maxStoreBlockSize
+ nFilled int
+ n uint16 // Must be able to contain maxStoreBlockSize
tokens [maxStoreBlockSize + 1]token
}
@@ -142,7 +142,7 @@ func (t *tokens) Reset() {
return
}
t.n = 0
- t.nLits = 0
+ t.nFilled = 0
for i := range t.litHist[:] {
t.litHist[i] = 0
}
@@ -161,12 +161,12 @@ func (t *tokens) Fill() {
for i, v := range t.litHist[:] {
if v == 0 {
t.litHist[i] = 1
- t.nLits++
+ t.nFilled++
}
}
for i, v := range t.extraHist[:literalCount-256] {
if v == 0 {
- t.nLits++
+ t.nFilled++
t.extraHist[i] = 1
}
}
@@ -202,14 +202,12 @@ func emitLiteral(dst *tokens, lit []byte) {
dst.litHist[v]++
}
dst.n += uint16(len(lit))
- dst.nLits += len(lit)
}
func (t *tokens) AddLiteral(lit byte) {
t.tokens[t.n] = token(lit)
t.litHist[lit]++
t.n++
- t.nLits++
}
// from https://stackoverflow.com/a/28730362
@@ -230,8 +228,9 @@ func (t *tokens) EstimatedBits() int {
shannon := float32(0)
bits := int(0)
nMatches := 0
- if t.nLits > 0 {
- invTotal := 1.0 / float32(t.nLits)
+ total := int(t.n) + t.nFilled
+ if total > 0 {
+ invTotal := 1.0 / float32(total)
for _, v := range t.litHist[:] {
if v > 0 {
n := float32(v)
@@ -275,7 +274,6 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
}
oCode := offsetCode(xoffset)
xoffset |= oCode << 16
- t.nLits++
t.extraHist[lengthCodes1[uint8(xlength)]]++
t.offHist[oCode]++
@@ -301,7 +299,6 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
}
xlength -= xl
xl -= baseMatchLength
- t.nLits++
t.extraHist[lengthCodes1[uint8(xl)]]++
t.offHist[oc]++
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 9b7cc8e97..2a06bd1a7 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -20,7 +20,7 @@ type dEntrySingle struct {
// double-symbols decoding
type dEntryDouble struct {
- seq uint16
+ seq [4]byte
nBits uint8
len uint8
}
@@ -753,23 +753,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
-
val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
-
val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
}
@@ -780,23 +778,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
br[stream2].fillFast()
val := br[stream].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
-
val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream] = uint8(v.entry >> 8)
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
val = br[stream].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- br[stream].advance(uint8(v.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
-
val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
br[stream2].advance(uint8(v2.entry))
+ buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
}
@@ -914,7 +910,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
out := dst
dstEvery := (dstSize + 3) / 4
- shift := (8 - d.actualTableLog) & 7
+ shift := (56 + (8 - d.actualTableLog)) & 63
const tlSize = 1 << 8
single := d.dt.single[:tlSize]
@@ -935,79 +931,91 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
// Interleave 2 decodes.
const stream = 0
const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[br[stream].peekByteFast()>>shift].entry
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
+ buf[off+bufoff*stream+3] = uint8(v >> 8)
}
{
const stream = 2
const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[br[stream].peekByteFast()>>shift].entry
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
-
- v = single[br[stream].peekByteFast()>>shift].entry
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
- v2 = single[br[stream2].peekByteFast()>>shift].entry
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
+ buf[off+bufoff*stream+3] = uint8(v >> 8)
}
off += 4
@@ -1073,7 +1081,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
}
// Read value and increment offset.
- v := single[br.peekByteFast()>>shift].entry
+ v := single[uint8(br.value>>shift)].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= int(nBits)
@@ -1121,7 +1129,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
out := dst
dstEvery := (dstSize + 3) / 4
- const shift = 0
+ const shift = 56
const tlSize = 1 << 8
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
@@ -1145,37 +1153,41 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
br[stream].fillFast()
br[stream2].fillFast()
- v := single[br[stream].peekByteFast()>>shift].entry
+ v := single[uint8(br[stream].value>>shift)].entry
+ v2 := single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
}
{
@@ -1184,37 +1196,41 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
br[stream].fillFast()
br[stream2].fillFast()
- v := single[br[stream].peekByteFast()>>shift].entry
+ v := single[uint8(br[stream].value>>shift)].entry
+ v2 := single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 := single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+1] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+2] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
- v = single[br[stream].peekByteFast()>>shift].entry
+ v = single[uint8(br[stream].value>>shift)].entry
+ v2 = single[uint8(br[stream2].value>>shift)].entry
+ br[stream].bitsRead += uint8(v)
+ br[stream].value <<= v & 63
+ br[stream2].bitsRead += uint8(v2)
+ br[stream2].value <<= v2 & 63
buf[off+bufoff*stream+3] = uint8(v >> 8)
- br[stream].advance(uint8(v))
-
- v2 = single[br[stream2].peekByteFast()>>shift].entry
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- br[stream2].advance(uint8(v2))
}
off += 4
@@ -1280,7 +1296,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
}
// Read value and increment offset.
- v := single[br.peekByteFast()>>shift].entry
+ v := single[br.peekByteFast()].entry
nBits := uint8(v)
br.advance(nBits)
bitsLeft -= int(nBits)
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 854458537..753d17df6 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -50,16 +50,23 @@ func (b *bitReader) getBits(n uint8) int {
if n == 0 /*|| b.bitsRead >= 64 */ {
return 0
}
- return b.getBitsFast(n)
+ return int(b.get32BitsFast(n))
}
-// getBitsFast requires that at least one bit is requested every time.
+// get32BitsFast requires that at least one bit is requested every time.
// There are no checks if the buffer is filled.
-func (b *bitReader) getBitsFast(n uint8) int {
+func (b *bitReader) get32BitsFast(n uint8) uint32 {
const regMask = 64 - 1
v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
- return int(v)
+ return v
+}
+
+func (b *bitReader) get16BitsFast(n uint8) uint16 {
+ const regMask = 64 - 1
+ v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
+ b.bitsRead += n
+ return v
}
// fillFast() will make sure at least 32 bits are available.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index 303ae90f9..b36618285 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -38,7 +38,7 @@ func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.nBits += bits
}
-// addBits32NC will add up to 32 bits.
+// addBits32NC will add up to 31 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
@@ -46,6 +46,26 @@ func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
b.nBits += bits
}
+// addBits64NC will add up to 64 bits.
+// There must be space for 32 bits.
+func (b *bitWriter) addBits64NC(value uint64, bits uint8) {
+ if bits <= 31 {
+ b.addBits32Clean(uint32(value), bits)
+ return
+ }
+ b.addBits32Clean(uint32(value), 32)
+ b.flush32()
+ b.addBits32Clean(uint32(value>>32), bits-32)
+}
+
+// addBits32Clean will add up to 32 bits.
+// It will not check if there is space for them.
+// The input must not contain more bits than specified.
+func (b *bitWriter) addBits32Clean(value uint32, bits uint8) {
+ b.bitContainer |= uint64(value) << (b.nBits & 63)
+ b.nBits += bits
+}
+
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 8a98c4562..dc587b2c9 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -76,12 +76,11 @@ type blockDec struct {
// Window size of the block.
WindowSize uint64
- history chan *history
- input chan struct{}
- result chan decodeOutput
- sequenceBuf []seq
- err error
- decWG sync.WaitGroup
+ history chan *history
+ input chan struct{}
+ result chan decodeOutput
+ err error
+ decWG sync.WaitGroup
// Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame.
@@ -512,18 +511,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:]
}
- // Allocate sequences
- if cap(b.sequenceBuf) < nSeqs {
- if b.lowMem {
- b.sequenceBuf = make([]seq, nSeqs)
- } else {
- // Allocate max
- b.sequenceBuf = make([]seq, nSeqs, maxSequences)
- }
- } else {
- // Reuse buffer
- b.sequenceBuf = b.sequenceBuf[:nSeqs]
- }
+
var seqs = &sequenceDecs{}
if nSeqs > 0 {
if len(in) < 1 {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 3df185ee4..12e8f6f0b 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -51,7 +51,7 @@ func (b *blockEnc) init() {
if cap(b.literals) < maxCompressedBlockSize {
b.literals = make([]byte, 0, maxCompressedBlockSize)
}
- const defSeqs = 200
+ const defSeqs = 2000
if cap(b.sequences) < defSeqs {
b.sequences = make([]seq, 0, defSeqs)
}
@@ -426,7 +426,7 @@ func fuzzFseEncoder(data []byte) int {
return 0
}
enc := fseEncoder{}
- hist := enc.Histogram()[:256]
+ hist := enc.Histogram()
maxSym := uint8(0)
for i, v := range data {
v = v & 63
@@ -722,52 +722,53 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
}
seq--
- if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 {
- // No need to flush (common)
- for seq >= 0 {
- s = b.sequences[seq]
- wr.flush32()
- llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
- // tabelog max is 8 for all.
- of.encode(ofB)
- ml.encode(mlB)
- ll.encode(llB)
- wr.flush32()
-
- // We checked that all can stay within 32 bits
- wr.addBits32NC(s.litLen, llB.outBits)
- wr.addBits32NC(s.matchLen, mlB.outBits)
- wr.addBits32NC(s.offset, ofB.outBits)
-
- if debugSequences {
- println("Encoded seq", seq, s)
- }
-
- seq--
- }
- } else {
- for seq >= 0 {
- s = b.sequences[seq]
- wr.flush32()
- llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
- // tabelog max is below 8 for each.
- of.encode(ofB)
- ml.encode(mlB)
- ll.encode(llB)
- wr.flush32()
-
- // ml+ll = max 32 bits total
- wr.addBits32NC(s.litLen, llB.outBits)
- wr.addBits32NC(s.matchLen, mlB.outBits)
- wr.flush32()
- wr.addBits32NC(s.offset, ofB.outBits)
-
- if debugSequences {
- println("Encoded seq", seq, s)
- }
-
- seq--
- }
+ // Store sequences in reverse...
+ for seq >= 0 {
+ s = b.sequences[seq]
+
+ ofB := ofTT[s.ofCode]
+ wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits.
+ //of.encode(ofB)
+ nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16
+ dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState)
+ wr.addBits16NC(of.state, uint8(nbBitsOut))
+ of.state = of.stateTable[dstState]
+
+ // Accumulate extra bits.
+ outBits := ofB.outBits & 31
+ extraBits := uint64(s.offset & bitMask32[outBits])
+ extraBitsN := outBits
+
+ mlB := mlTT[s.mlCode]
+ //ml.encode(mlB)
+ nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16
+ dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState)
+ wr.addBits16NC(ml.state, uint8(nbBitsOut))
+ ml.state = ml.stateTable[dstState]
+
+ outBits = mlB.outBits & 31
+ extraBits = extraBits<<outBits | uint64(s.matchLen&bitMask32[outBits])
+ extraBitsN += outBits
+
+ llB := llTT[s.llCode]
+ //ll.encode(llB)
+ nbBitsOut = (uint32(ll.state) + llB.deltaNbBits) >> 16
+ dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState)
+ wr.addBits16NC(ll.state, uint8(nbBitsOut))
+ ll.state = ll.stateTable[dstState]
+
+ outBits = llB.outBits & 31
+ extraBits = extraBits<<outBits | uint64(s.litLen&bitMask32[outBits])
+ extraBitsN += outBits
+
+ wr.flush32()
+ wr.addBits64NC(extraBits, extraBitsN)
+
+ if debugSequences {
+ println("Encoded seq", seq, s)
+ }
+
+ seq--
}
ml.flush(mlEnc.actualTableLog)
of.flush(ofEnc.actualTableLog)
@@ -801,14 +802,13 @@ func (b *blockEnc) genCodes() {
// nothing to do
return
}
-
if len(b.sequences) > math.MaxUint16 {
panic("can only encode up to 64K sequences")
}
// No bounds checks after here:
- llH := b.coders.llEnc.Histogram()[:256]
- ofH := b.coders.ofEnc.Histogram()[:256]
- mlH := b.coders.mlEnc.Histogram()[:256]
+ llH := b.coders.llEnc.Histogram()
+ ofH := b.coders.ofEnc.Histogram()
+ mlH := b.coders.mlEnc.Histogram()
for i := range llH {
llH[i] = 0
}
@@ -820,7 +820,8 @@ func (b *blockEnc) genCodes() {
}
var llMax, ofMax, mlMax uint8
- for i, seq := range b.sequences {
+ for i := range b.sequences {
+ seq := &b.sequences[i]
v := llCode(seq.litLen)
seq.llCode = v
llH[v]++
@@ -844,7 +845,6 @@ func (b *blockEnc) genCodes() {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
}
}
- b.sequences[i] = seq
}
maxCount := func(a []uint32) int {
var max uint32
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
index 69736e8d4..5022e71c8 100644
--- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -5,6 +5,7 @@ package zstd
import (
"bytes"
+ "encoding/binary"
"errors"
"io"
)
@@ -15,18 +16,50 @@ const HeaderMaxSize = 14 + 3
// Header contains information about the first frame and block within that.
type Header struct {
- // Window Size the window of data to keep while decoding.
- // Will only be set if HasFCS is false.
- WindowSize uint64
+ // SingleSegment specifies whether the data is to be decompressed into a
+ // single contiguous memory segment.
+ // It implies that WindowSize is invalid and that FrameContentSize is valid.
+ SingleSegment bool
- // Frame content size.
- // Expected size of the entire frame.
- FrameContentSize uint64
+ // WindowSize is the window of data to keep while decoding.
+ // Will only be set if SingleSegment is false.
+ WindowSize uint64
// Dictionary ID.
// If 0, no dictionary.
DictionaryID uint32
+ // HasFCS specifies whether FrameContentSize has a valid value.
+ HasFCS bool
+
+ // FrameContentSize is the expected uncompressed size of the entire frame.
+ FrameContentSize uint64
+
+ // Skippable will be true if the frame is meant to be skipped.
+ // This implies that FirstBlock.OK is false.
+ Skippable bool
+
+ // SkippableID is the user-specific ID for the skippable frame.
+ // Valid values are between 0 to 15, inclusive.
+ SkippableID int
+
+ // SkippableSize is the length of the user data to skip following
+ // the header.
+ SkippableSize uint32
+
+ // HeaderSize is the raw size of the frame header.
+ //
+ // For normal frames, it includes the size of the magic number and
+ // the size of the header (per section 3.1.1.1).
+ // It does not include the size for any data blocks (section 3.1.1.2) nor
+ // the size for the trailing content checksum.
+ //
+ // For skippable frames, this counts the size of the magic number
+ // along with the size of the size field of the payload.
+ // It does not include the size of the skippable payload itself.
+ // The total frame size is the HeaderSize plus the SkippableSize.
+ HeaderSize int
+
// First block information.
FirstBlock struct {
// OK will be set if first block could be decoded.
@@ -51,17 +84,9 @@ type Header struct {
CompressedSize int
}
- // Skippable will be true if the frame is meant to be skipped.
- // No other information will be populated.
- Skippable bool
-
// If set there is a checksum present for the block content.
+ // The checksum field at the end is always 4 bytes long.
HasCheckSum bool
-
- // If this is true FrameContentSize will have a valid value
- HasFCS bool
-
- SingleSegment bool
}
// Decode the header from the beginning of the stream.
@@ -71,39 +96,46 @@ type Header struct {
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
func (h *Header) Decode(in []byte) error {
+ *h = Header{}
if len(in) < 4 {
return io.ErrUnexpectedEOF
}
+ h.HeaderSize += 4
b, in := in[:4], in[4:]
if !bytes.Equal(b, frameMagic) {
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
return ErrMagicMismatch
}
- *h = Header{Skippable: true}
+ if len(in) < 4 {
+ return io.ErrUnexpectedEOF
+ }
+ h.HeaderSize += 4
+ h.Skippable = true
+ h.SkippableID = int(b[0] & 0xf)
+ h.SkippableSize = binary.LittleEndian.Uint32(in)
return nil
}
+
+ // Read Window_Descriptor
+ // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if len(in) < 1 {
return io.ErrUnexpectedEOF
}
-
- // Clear output
- *h = Header{}
fhd, in := in[0], in[1:]
+ h.HeaderSize++
h.SingleSegment = fhd&(1<<5) != 0
h.HasCheckSum = fhd&(1<<2) != 0
-
if fhd&(1<<3) != 0 {
return errors.New("reserved bit set on frame header")
}
- // Read Window_Descriptor
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if !h.SingleSegment {
if len(in) < 1 {
return io.ErrUnexpectedEOF
}
var wd byte
wd, in = in[0], in[1:]
+ h.HeaderSize++
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
@@ -120,9 +152,7 @@ func (h *Header) Decode(in []byte) error {
return io.ErrUnexpectedEOF
}
b, in = in[:size], in[size:]
- if b == nil {
- return io.ErrUnexpectedEOF
- }
+ h.HeaderSize += int(size)
switch size {
case 1:
h.DictionaryID = uint32(b[0])
@@ -152,9 +182,7 @@ func (h *Header) Decode(in []byte) error {
return io.ErrUnexpectedEOF
}
b, in = in[:fcsSize], in[fcsSize:]
- if b == nil {
- return io.ErrUnexpectedEOF
- }
+ h.HeaderSize += int(fcsSize)
switch fcsSize {
case 1:
h.FrameContentSize = uint64(b[0])
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 295cd602a..15ae8ee80 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -108,11 +108,6 @@ func (e *fastBase) UseBlock(enc *blockEnc) {
e.blk = enc
}
-func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 {
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
-}
-
func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
if debugAsserts {
if s < 0 {
@@ -131,9 +126,24 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
}
}
+ a := src[s:]
+ b := src[t:]
+ b = b[:len(a)]
+ end := int32((len(a) >> 3) << 3)
+ for i := int32(0); i < end; i += 8 {
+ if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
+ return i + int32(bits.TrailingZeros64(diff)>>3)
+ }
+ }
- // Extend the match to be as long as possible.
- return int32(matchLen(src[s:], src[t:]))
+ a = a[end:]
+ b = b[end:]
+ for i := range a {
+ if a[i] != b[i] {
+ return int32(i) + end
+ }
+ }
+ return int32(len(a)) + end
}
// Reset the encoding table.
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f2502629b..5f08a2830 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -6,8 +6,6 @@ package zstd
import (
"fmt"
- "math"
- "math/bits"
)
const (
@@ -136,20 +134,7 @@ encodeLoop:
// Consider history as well.
var seq seq
var length int32
- // length = 4 + e.matchlen(s+6, repIndex+4, src)
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
-
+ length = 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
@@ -236,20 +221,7 @@ encodeLoop:
}
// Extend the 4-byte match as long as possible.
- //l := e.matchlen(s+4, t+4, src) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
@@ -286,20 +258,7 @@ encodeLoop:
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- //l := 4 + e.matchlen(s+4, o2+4, src)
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
@@ -418,21 +377,7 @@ encodeLoop:
if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
- // length := 4 + e.matchlen(s+6, repIndex+4, src)
- // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
- var length int32
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
@@ -522,21 +467,7 @@ encodeLoop:
panic(fmt.Sprintf("t (%d) < 0 ", t))
}
// Extend the 4-byte match as long as possible.
- //l := e.matchlenNoHist(s+4, t+4, src) + 4
- // l := int32(matchLen(src[s+4:], src[t+4:])) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
@@ -573,21 +504,7 @@ encodeLoop:
if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- //l := 4 + e.matchlenNoHist(s+4, o2+4, src)
- // l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
@@ -731,19 +648,7 @@ encodeLoop:
// Consider history as well.
var seq seq
var length int32
- // length = 4 + e.matchlen(s+6, repIndex+4, src)
- {
- a := src[s+6:]
- b := src[repIndex+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- length = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ length = 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
@@ -831,20 +736,7 @@ encodeLoop:
}
// Extend the 4-byte match as long as possible.
- //l := e.matchlen(s+4, t+4, src) + 4
- var l int32
- {
- a := src[s+4:]
- b := src[t+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := e.matchlen(s+4, t+4, src) + 4
// Extend backwards
tMin := s - e.maxMatchOff
@@ -881,20 +773,7 @@ encodeLoop:
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
- //l := 4 + e.matchlen(s+4, o2+4, src)
- var l int32
- {
- a := src[s+4:]
- b := src[o2+4:]
- endI := len(a) & (math.MaxInt32 - 7)
- l = int32(endI) + 4
- for i := 0; i < endI; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
- break
- }
- }
- }
+ l := 4 + e.matchlen(s+4, o2+4, src)
// Store this, since we have it.
nextHash := hashLen(cv, hashLog, tableFastHashLen)
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 7d29e1d68..5f2e1d020 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -24,6 +24,7 @@ type encoderOptions struct {
allLitEntropy bool
customWindow bool
customALEntropy bool
+ customBlockSize bool
lowMem bool
dict *dict
}
@@ -33,7 +34,7 @@ func (o *encoderOptions) setDefault() {
concurrent: runtime.GOMAXPROCS(0),
crc: true,
single: nil,
- blockSize: 1 << 16,
+ blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
allLitEntropy: true,
@@ -106,6 +107,7 @@ func WithWindowSize(n int) EOption {
o.customWindow = true
if o.blockSize > o.windowSize {
o.blockSize = o.windowSize
+ o.customBlockSize = true
}
return nil
}
@@ -188,10 +190,9 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
return SpeedDefault
case level >= 6 && level < 10:
return SpeedBetterCompression
- case level >= 10:
+ default:
return SpeedBestCompression
}
- return SpeedDefault
}
// String provides a string representation of the compression level.
@@ -222,6 +223,9 @@ func WithEncoderLevel(l EncoderLevel) EOption {
switch o.level {
case SpeedFastest:
o.windowSize = 4 << 20
+ if !o.customBlockSize {
+ o.blockSize = 1 << 16
+ }
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
index e6d3d49b3..bb3d4fd6c 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go
@@ -379,7 +379,7 @@ func (s decSymbol) final() (int, uint8) {
// This can only be used if no symbols are 0 bits.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
- lowBits := uint16(br.getBitsFast(s.state.nbBits()))
+ lowBits := br.get16BitsFast(s.state.nbBits())
s.state = s.dt[s.state.newState()+lowBits]
return s.state.baseline(), s.state.addBits()
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
index b4757ee3f..5442061b1 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go
@@ -62,9 +62,8 @@ func (s symbolTransform) String() string {
// To indicate that you have populated the histogram call HistogramFinished
// with the value of the highest populated symbol, as well as the number of entries
// in the most populated entry. These are accepted at face value.
-// The returned slice will always be length 256.
-func (s *fseEncoder) Histogram() []uint32 {
- return s.count[:]
+func (s *fseEncoder) Histogram() *[256]uint32 {
+ return &s.count
}
// HistogramFinished can be called to indicate that the histogram has been populated.
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index be8db5bf7..cea178561 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -1,6 +1,7 @@
// +build !appengine
// +build gc
// +build !purego
+// +build !noasm
#include "textflag.h"
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
new file mode 100644
index 000000000..4d64a17d6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -0,0 +1,186 @@
+// +build gc,!purego,!noasm
+
+#include "textflag.h"
+
+// Register allocation.
+#define digest R1
+#define h R2 // Return value.
+#define p R3 // Input pointer.
+#define len R4
+#define nblocks R5 // len / 32.
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc \
+
+// x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x \
+
+#define mergeRound(x) \
+ round0(x) \
+ EOR x, h \
+ MADD h, prime4, prime1, h \
+
+// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
+#define blocksLoop() \
+ LSR $5, len, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 32(p), (x1, x2) \
+ round(v1, x1) \
+ LDP -16(p), (x3, x4) \
+ round(v2, x2) \
+ SUB $1, nblocks \
+ round(v3, x3) \
+ round(v4, x4) \
+ CBNZ nblocks, loop \
+
+// The primes are repeated here to ensure that they're stored
+// in a contiguous array, so we can load them with LDP.
+DATA primes<> +0(SB)/8, $11400714785074694791
+DATA primes<> +8(SB)/8, $14029467366897019727
+DATA primes<>+16(SB)/8, $1609587929392839161
+DATA primes<>+24(SB)/8, $9650029242287828579
+DATA primes<>+32(SB)/8, $2870177450012600261
+GLOBL primes<>(SB), NOPTR+RODATA, $40
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
+ LDP b_base+0(FP), (p, len)
+
+ LDP primes<> +0(SB), (prime1, prime2)
+ LDP primes<>+16(SB), (prime3, prime4)
+ MOVD primes<>+32(SB), prime5
+
+ CMP $32, len
+ CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
+ BLO afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blocksLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(v1)
+ mergeRound(v2)
+ mergeRound(v3)
+ mergeRound(v4)
+
+afterLoop:
+ ADD len, h
+
+ TBZ $4, len, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, len, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, len, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, len, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, len, end
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h
+ MUL prime1, h
+
+end:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+//
+// Assumes len(b) >= 32.
+TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
+ LDP primes<>(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, len)
+
+ blocksLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, len
+ MOVD len, ret+32(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
index 0ae847f75..1a1fac9c2 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
@@ -1,5 +1,9 @@
-//go:build !appengine && gc && !purego
-// +build !appengine,gc,!purego
+//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm
+// +build amd64 arm64
+// +build !appengine
+// +build gc
+// +build !purego
+// +build !noasm
package xxhash
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
index 1f52f296e..209cb4a99 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -1,5 +1,5 @@
-//go:build !amd64 || appengine || !gc || purego
-// +build !amd64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm
+// +build !amd64,!arm64 appengine !gc purego noasm
package xxhash
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index 1dd39e63b..bc731e4cb 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -278,7 +278,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
- bits := br.getBitsFast(nBits)
+ bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@@ -326,7 +326,7 @@ func (s *sequenceDecs) updateAlt(br *bitReader) {
s.offsets.state.state = s.offsets.state.dt[c.newState()]
return
}
- bits := br.getBitsFast(nBits)
+ bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
diff --git a/vendor/github.com/mtrmac/gpgme/go.mod b/vendor/github.com/mtrmac/gpgme/go.mod
deleted file mode 100644
index 3dd09c9fb..000000000
--- a/vendor/github.com/mtrmac/gpgme/go.mod
+++ /dev/null
@@ -1,3 +0,0 @@
-module github.com/mtrmac/gpgme
-
-go 1.11
diff --git a/vendor/github.com/mtrmac/gpgme/.appveyor.yml b/vendor/github.com/proglottis/gpgme/.appveyor.yml
index 2fdc09ab5..2fdc09ab5 100644
--- a/vendor/github.com/mtrmac/gpgme/.appveyor.yml
+++ b/vendor/github.com/proglottis/gpgme/.appveyor.yml
diff --git a/vendor/github.com/mtrmac/gpgme/.gitignore b/vendor/github.com/proglottis/gpgme/.gitignore
index 0210b26e0..0210b26e0 100644
--- a/vendor/github.com/mtrmac/gpgme/.gitignore
+++ b/vendor/github.com/proglottis/gpgme/.gitignore
diff --git a/vendor/github.com/mtrmac/gpgme/.travis.yml b/vendor/github.com/proglottis/gpgme/.travis.yml
index 619e33721..619e33721 100644
--- a/vendor/github.com/mtrmac/gpgme/.travis.yml
+++ b/vendor/github.com/proglottis/gpgme/.travis.yml
diff --git a/vendor/github.com/mtrmac/gpgme/LICENSE b/vendor/github.com/proglottis/gpgme/LICENSE
index 06d4ab773..06d4ab773 100644
--- a/vendor/github.com/mtrmac/gpgme/LICENSE
+++ b/vendor/github.com/proglottis/gpgme/LICENSE
diff --git a/vendor/github.com/mtrmac/gpgme/README.md b/vendor/github.com/proglottis/gpgme/README.md
index 4770b82a8..4770b82a8 100644
--- a/vendor/github.com/mtrmac/gpgme/README.md
+++ b/vendor/github.com/proglottis/gpgme/README.md
diff --git a/vendor/github.com/mtrmac/gpgme/callbacks.go b/vendor/github.com/proglottis/gpgme/callbacks.go
index d1dc610d4..d1dc610d4 100644
--- a/vendor/github.com/mtrmac/gpgme/callbacks.go
+++ b/vendor/github.com/proglottis/gpgme/callbacks.go
diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/proglottis/gpgme/data.go
index eee32c032..eee32c032 100644
--- a/vendor/github.com/mtrmac/gpgme/data.go
+++ b/vendor/github.com/proglottis/gpgme/data.go
diff --git a/vendor/github.com/proglottis/gpgme/go.mod b/vendor/github.com/proglottis/gpgme/go.mod
new file mode 100644
index 000000000..5badc8e69
--- /dev/null
+++ b/vendor/github.com/proglottis/gpgme/go.mod
@@ -0,0 +1,3 @@
+module github.com/proglottis/gpgme
+
+go 1.11
diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/proglottis/gpgme/go_gpgme.c
index 00da3ab30..00da3ab30 100644
--- a/vendor/github.com/mtrmac/gpgme/go_gpgme.c
+++ b/vendor/github.com/proglottis/gpgme/go_gpgme.c
diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/proglottis/gpgme/go_gpgme.h
index d4826ab36..eb3a4ba88 100644
--- a/vendor/github.com/mtrmac/gpgme/go_gpgme.h
+++ b/vendor/github.com/proglottis/gpgme/go_gpgme.h
@@ -6,11 +6,6 @@
#include <gpgme.h>
-/* GPGME_VERSION_NUMBER was introduced in 1.4.0 */
-#if !defined(GPGME_VERSION_NUMBER) || GPGME_VERSION_NUMBER < 0x010402
-typedef off_t gpgme_off_t; /* Introduced in 1.4.2 */
-#endif
-
extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size);
extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size);
extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence);
diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/proglottis/gpgme/gpgme.go
index c19b9aebc..9833057a6 100644
--- a/vendor/github.com/mtrmac/gpgme/gpgme.go
+++ b/vendor/github.com/proglottis/gpgme/gpgme.go
@@ -53,13 +53,13 @@ const (
type PinEntryMode int
-// const ( // Unavailable in 1.3.2
-// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT
-// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK
-// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL
-// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR
-// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK
-// )
+const (
+ PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT
+ PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK
+ PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL
+ PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR
+ PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK
+)
type EncryptFlag uint
@@ -348,19 +348,17 @@ func (c *Context) KeyListMode() KeyListMode {
return res
}
-// Unavailable in 1.3.2:
-// func (c *Context) SetPinEntryMode(m PinEntryMode) error {
-// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
-// runtime.KeepAlive(c)
-// return err
-// }
+func (c *Context) SetPinEntryMode(m PinEntryMode) error {
+ err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
+ runtime.KeepAlive(c)
+ return err
+}
-// Unavailable in 1.3.2:
-// func (c *Context) PinEntryMode() PinEntryMode {
-// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
-// runtime.KeepAlive(c)
-// return res
-// }
+func (c *Context) PinEntryMode() PinEntryMode {
+ res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
+ runtime.KeepAlive(c)
+ return res
+}
func (c *Context) SetCallback(callback Callback) error {
var err error
diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info.go b/vendor/github.com/proglottis/gpgme/unset_agent_info.go
index 986aca59f..986aca59f 100644
--- a/vendor/github.com/mtrmac/gpgme/unset_agent_info.go
+++ b/vendor/github.com/proglottis/gpgme/unset_agent_info.go
diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go b/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go
index 431ec86d3..431ec86d3 100644
--- a/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go
+++ b/vendor/github.com/proglottis/gpgme/unset_agent_info_windows.go
diff --git a/vendor/github.com/sylabs/sif/v2/LICENSE.md b/vendor/github.com/sylabs/sif/v2/LICENSE.md
new file mode 100644
index 000000000..30ea0e758
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/LICENSE.md
@@ -0,0 +1,29 @@
+# LICENSE
+
+Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go
new file mode 100644
index 000000000..d7acbb694
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go
@@ -0,0 +1,69 @@
+// Copyright (c) 2021, Sylabs Inc. All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+var (
+ hdrArchUnknown archType = [...]byte{'0', '0', '\x00'}
+ hdrArch386 archType = [...]byte{'0', '1', '\x00'}
+ hdrArchAMD64 archType = [...]byte{'0', '2', '\x00'}
+ hdrArchARM archType = [...]byte{'0', '3', '\x00'}
+ hdrArchARM64 archType = [...]byte{'0', '4', '\x00'}
+ hdrArchPPC64 archType = [...]byte{'0', '5', '\x00'}
+ hdrArchPPC64le archType = [...]byte{'0', '6', '\x00'}
+ hdrArchMIPS archType = [...]byte{'0', '7', '\x00'}
+ hdrArchMIPSle archType = [...]byte{'0', '8', '\x00'}
+ hdrArchMIPS64 archType = [...]byte{'0', '9', '\x00'}
+ hdrArchMIPS64le archType = [...]byte{'1', '0', '\x00'}
+ hdrArchS390x archType = [...]byte{'1', '1', '\x00'}
+)
+
+type archType [3]byte
+
+// getSIFArch returns the archType corresponding to go runtime arch.
+func getSIFArch(arch string) archType {
+ archMap := map[string]archType{
+ "386": hdrArch386,
+ "amd64": hdrArchAMD64,
+ "arm": hdrArchARM,
+ "arm64": hdrArchARM64,
+ "ppc64": hdrArchPPC64,
+ "ppc64le": hdrArchPPC64le,
+ "mips": hdrArchMIPS,
+ "mipsle": hdrArchMIPSle,
+ "mips64": hdrArchMIPS64,
+ "mips64le": hdrArchMIPS64le,
+ "s390x": hdrArchS390x,
+ }
+
+ t, ok := archMap[arch]
+ if !ok {
+ return hdrArchUnknown
+ }
+ return t
+}
+
+// GoArch returns the go runtime arch corresponding to t.
+func (t archType) GoArch() string {
+ archMap := map[archType]string{
+ hdrArch386: "386",
+ hdrArchAMD64: "amd64",
+ hdrArchARM: "arm",
+ hdrArchARM64: "arm64",
+ hdrArchPPC64: "ppc64",
+ hdrArchPPC64le: "ppc64le",
+ hdrArchMIPS: "mips",
+ hdrArchMIPSle: "mipsle",
+ hdrArchMIPS64: "mips64",
+ hdrArchMIPS64le: "mips64le",
+ hdrArchS390x: "s390x",
+ }
+
+ arch, ok := archMap[t]
+ if !ok {
+ arch = "unknown"
+ }
+ return arch
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go
new file mode 100644
index 000000000..d706fb1a5
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2021, Sylabs Inc. All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "errors"
+ "io"
+)
+
+// A Buffer is a variable-sized buffer of bytes that implements the sif.ReadWriter interface. The
+// zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte
+ pos int64
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial contents.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+var errNegativeOffset = errors.New("negative offset")
+
+// ReadAt implements the io.ReaderAt interface.
+func (b *Buffer) ReadAt(p []byte, off int64) (n int, err error) {
+ if off < 0 {
+ return 0, errNegativeOffset
+ }
+
+ if off >= int64(len(b.buf)) {
+ return 0, io.EOF
+ }
+
+ n = copy(p, b.buf[off:])
+ if n < len(p) {
+ err = io.EOF
+ }
+ return n, err
+}
+
+var errNegativePosition = errors.New("negative position")
+
+// Write implements the io.Writer interface.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ if b.pos < 0 {
+ return 0, errNegativePosition
+ }
+
+ if have, need := int64(len(b.buf))-b.pos, int64(len(p)); have < need {
+ b.buf = append(b.buf, make([]byte, need-have)...)
+ }
+
+ n = copy(b.buf[b.pos:], p)
+ b.pos += int64(n)
+ return n, nil
+}
+
+var errInvalidWhence = errors.New("invalid whence")
+
+// Seek implements the io.Seeker interface.
+func (b *Buffer) Seek(offset int64, whence int) (int64, error) {
+ var abs int64
+
+ switch whence {
+ case io.SeekStart:
+ abs = offset
+ case io.SeekCurrent:
+ abs = b.pos + offset
+ case io.SeekEnd:
+ abs = int64(len(b.buf)) + offset
+ default:
+ return 0, errInvalidWhence
+ }
+
+ if abs < 0 {
+ return 0, errNegativePosition
+ }
+
+ b.pos = abs
+ return abs, nil
+}
+
+var errTruncateRange = errors.New("truncation out of range")
+
+// Truncate discards all but the first n bytes from the buffer.
+func (b *Buffer) Truncate(n int64) error {
+ if n < 0 || n > int64(len(b.buf)) {
+ return errTruncateRange
+ }
+
+ b.buf = b.buf[:n]
+ return nil
+}
+
+// Bytes returns the contents of the buffer. The slice is valid for use only until the next buffer
+// modification (that is, only until the next call to a method like ReadAt, Write, or Truncate).
+func (b *Buffer) Bytes() []byte { return b.buf }
+
+// Len returns the number of bytes in the buffer.
+func (b *Buffer) Len() int64 { return int64(len(b.buf)) }
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
new file mode 100644
index 000000000..e65bdb747
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
@@ -0,0 +1,680 @@
+// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
+// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
+// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/google/uuid"
+)
+
+// nextAligned finds the next offset that satisfies alignment.
+func nextAligned(offset int64, alignment int) int64 {
+ align64 := uint64(alignment)
+ offset64 := uint64(offset)
+
+ if align64 != 0 && offset64%align64 != 0 {
+ offset64 = (offset64 & ^(align64 - 1)) + align64
+ }
+
+ return int64(offset64)
+}
+
+// writeDataObjectAt writes the data object described by di to ws, using time t, recording details
+// in d. The object is written at the first position that satisfies the alignment requirements
+// described by di following offsetUnaligned.
+func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll
+ offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart)
+ if err != nil {
+ return err
+ }
+
+ n, err := io.Copy(ws, di.r)
+ if err != nil {
+ return err
+ }
+
+ if err := di.fillDescriptor(t, d); err != nil {
+ return err
+ }
+ d.Used = true
+ d.Offset = offset
+ d.Size = n
+ d.SizeWithPadding = offset - offsetUnaligned + n
+
+ return nil
+}
+
+var (
+ errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image")
+ errPrimaryPartition = errors.New("image already contains a primary partition")
+)
+
+// writeDataObject writes the data object described by di to f, using time t, recording details in
+// the descriptor at index i.
+func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) error {
+ if i >= len(f.rds) {
+ return errInsufficientCapacity
+ }
+
+ // If this is a primary partition, verify there isn't another primary partition, and update the
+ // architecture in the global header.
+ if p, ok := di.opts.extra.(partition); ok && p.Parttype == PartPrimSys {
+ if ds, err := f.GetDescriptors(WithPartitionType(PartPrimSys)); err == nil && len(ds) > 0 {
+ return errPrimaryPartition
+ }
+
+ f.h.Arch = p.Arch
+ }
+
+ d := &f.rds[i]
+ d.ID = uint32(i) + 1
+
+ if err := writeDataObjectAt(f.rw, f.h.DataOffset+f.h.DataSize, di, t, d); err != nil {
+ return err
+ }
+
+ // Update minimum object ID map.
+ if minID, ok := f.minIDs[d.GroupID]; !ok || d.ID < minID {
+ f.minIDs[d.GroupID] = d.ID
+ }
+
+ f.h.DescriptorsFree--
+ f.h.DataSize += d.SizeWithPadding
+
+ return nil
+}
+
+// writeDescriptors writes the descriptors in f to backing storage.
+func (f *FileImage) writeDescriptors() error {
+ if _, err := f.rw.Seek(f.h.DescriptorsOffset, io.SeekStart); err != nil {
+ return err
+ }
+
+ return binary.Write(f.rw, binary.LittleEndian, f.rds)
+}
+
+// writeHeader writes the the global header in f to backing storage.
+func (f *FileImage) writeHeader() error {
+ if _, err := f.rw.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+
+ return binary.Write(f.rw, binary.LittleEndian, f.h)
+}
+
+// createOpts accumulates container creation options.
+type createOpts struct {
+ launchScript [hdrLaunchLen]byte
+ id uuid.UUID
+ descriptorsOffset int64
+ descriptorCapacity int64
+ dis []DescriptorInput
+ t time.Time
+ closeOnUnload bool
+}
+
+// CreateOpt are used to specify container creation options.
+type CreateOpt func(*createOpts) error
+
+var errLaunchScriptLen = errors.New("launch script too large")
+
+// OptCreateWithLaunchScript specifies s as the launch script.
+func OptCreateWithLaunchScript(s string) CreateOpt {
+ return func(co *createOpts) error {
+ b := []byte(s)
+
+ if len(b) >= len(co.launchScript) {
+ return errLaunchScriptLen
+ }
+
+ copy(co.launchScript[:], b)
+
+ return nil
+ }
+}
+
+// OptCreateDeterministic sets header/descriptor fields to values that support deterministic
+// creation of images.
+func OptCreateDeterministic() CreateOpt {
+ return func(co *createOpts) error {
+ co.id = uuid.Nil
+ co.t = time.Time{}
+ return nil
+ }
+}
+
+// OptCreateWithID specifies id as the unique ID.
+func OptCreateWithID(id string) CreateOpt {
+ return func(co *createOpts) error {
+ id, err := uuid.Parse(id)
+ co.id = id
+ return err
+ }
+}
+
+// OptCreateWithDescriptorCapacity specifies that the created image should have the capacity for a
+// maximum of n descriptors.
+func OptCreateWithDescriptorCapacity(n int64) CreateOpt {
+ return func(co *createOpts) error {
+ co.descriptorCapacity = n
+ return nil
+ }
+}
+
+// OptCreateWithDescriptors appends dis to the list of descriptors.
+func OptCreateWithDescriptors(dis ...DescriptorInput) CreateOpt {
+ return func(co *createOpts) error {
+ co.dis = append(co.dis, dis...)
+ return nil
+ }
+}
+
+// OptCreateWithTime specifies t as the image creation time.
+func OptCreateWithTime(t time.Time) CreateOpt {
+ return func(co *createOpts) error {
+ co.t = t
+ return nil
+ }
+}
+
+// OptCreateWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer.
+// By default, the ReadWriter will be closed if it implements the io.Closer interface.
+func OptCreateWithCloseOnUnload(b bool) CreateOpt {
+ return func(co *createOpts) error {
+ co.closeOnUnload = b
+ return nil
+ }
+}
+
+// createContainer creates a new SIF container file in rw, according to opts.
+func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) {
+ rds := make([]rawDescriptor, co.descriptorCapacity)
+ rdsSize := int64(binary.Size(rds))
+
+ h := header{
+ LaunchScript: co.launchScript,
+ Magic: hdrMagic,
+ Version: CurrentVersion.bytes(),
+ Arch: hdrArchUnknown,
+ ID: co.id,
+ CreatedAt: co.t.Unix(),
+ ModifiedAt: co.t.Unix(),
+ DescriptorsFree: co.descriptorCapacity,
+ DescriptorsTotal: co.descriptorCapacity,
+ DescriptorsOffset: co.descriptorsOffset,
+ DescriptorsSize: rdsSize,
+ DataOffset: co.descriptorsOffset + rdsSize,
+ }
+
+ f := &FileImage{
+ rw: rw,
+ h: h,
+ rds: rds,
+ minIDs: make(map[uint32]uint32),
+ }
+
+ for i, di := range co.dis {
+ if err := f.writeDataObject(i, di, co.t); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := f.writeDescriptors(); err != nil {
+ return nil, err
+ }
+
+ if err := f.writeHeader(); err != nil {
+ return nil, err
+ }
+
+ return f, nil
+}
+
+// CreateContainer creates a new SIF container in rw, according to opts. One or more data objects
+// can optionally be specified using OptCreateWithDescriptors.
+//
+// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
+// are released. By default, UnloadContainer will close rw if it implements the io.Closer
+// interface. To change this behavior, consider using OptCreateWithCloseOnUnload.
+//
+// By default, the image ID is set to a randomly generated value. To override this, consider using
+// OptCreateDeterministic or OptCreateWithID.
+//
+// By default, the image creation time is set to time.Now(). To override this, consider using
+// OptCreateDeterministic or OptCreateWithTime.
+//
+// By default, the image will support a maximum of 48 descriptors. To change this, consider using
+// OptCreateWithDescriptorCapacity.
+//
+// A launch script can optionally be set using OptCreateWithLaunchScript.
+func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) {
+ id, err := uuid.NewRandom()
+ if err != nil {
+ return nil, err
+ }
+
+ co := createOpts{
+ id: id,
+ descriptorsOffset: 4096,
+ descriptorCapacity: 48,
+ t: time.Now(),
+ closeOnUnload: true,
+ }
+
+ for _, opt := range opts {
+ if err := opt(&co); err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+ }
+
+ f, err := createContainer(rw, co)
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f.closeOnUnload = co.closeOnUnload
+ return f, nil
+}
+
+// CreateContainerAtPath creates a new SIF container file at path, according to opts. One or more
+// data objects can optionally be specified using OptCreateWithDescriptors.
+//
+// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
+// are released.
+//
+// By default, the image ID is set to a randomly generated value. To override this, consider using
+// OptCreateDeterministic or OptCreateWithID.
+//
+// By default, the image creation time is set to time.Now(). To override this, consider using
+// OptCreateDeterministic or OptCreateWithTime.
+//
+// By default, the image will support a maximum of 48 descriptors. To change this, consider using
+// OptCreateWithDescriptorCapacity.
+//
+// A launch script can optionally be set using OptCreateWithLaunchScript.
+func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) {
+ fp, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755)
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f, err := CreateContainer(fp, opts...)
+ if err != nil {
+ fp.Close()
+ os.Remove(fp.Name())
+
+ return nil, err
+ }
+
+ f.closeOnUnload = true
+ return f, nil
+}
+
+func zeroData(fimg *FileImage, descr *rawDescriptor) error {
+ // first, move to data object offset
+ if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil {
+ return err
+ }
+
+ var zero [4096]byte
+ n := descr.Size
+ upbound := int64(4096)
+ for {
+ if n < 4096 {
+ upbound = n
+ }
+
+ if _, err := fimg.rw.Write(zero[:upbound]); err != nil {
+ return err
+ }
+ n -= 4096
+ if n <= 0 {
+ break
+ }
+ }
+
+ return nil
+}
+
+func resetDescriptor(fimg *FileImage, index int) error {
+ // If we remove the primary partition, set the global header Arch field to HdrArchUnknown
+ // to indicate that the SIF file doesn't include a primary partition and no dependency
+ // on any architecture exists.
+ if fimg.rds[index].isPartitionOfType(PartPrimSys) {
+ fimg.h.Arch = hdrArchUnknown
+ }
+
+ offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0]))
+
+ // first, move to descriptor offset
+ if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil {
+ return err
+ }
+
+ var emptyDesc rawDescriptor
+ return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc)
+}
+
+// addOpts accumulates object add options.
+type addOpts struct {
+ t time.Time
+}
+
+// AddOpt are used to specify object add options.
+type AddOpt func(*addOpts) error
+
+// OptAddDeterministic sets header/descriptor fields to values that support deterministic
+// modification of images.
+func OptAddDeterministic() AddOpt {
+ return func(ao *addOpts) error {
+ ao.t = time.Time{}
+ return nil
+ }
+}
+
+// OptAddWithTime specifies t as the image modification time.
+func OptAddWithTime(t time.Time) AddOpt {
+ return func(ao *addOpts) error {
+ ao.t = t
+ return nil
+ }
+}
+
+// AddObject adds a new data object and its descriptor into the specified SIF file.
+//
+// By default, the image modification time is set to the current time. To override this, consider
+// using OptAddDeterministic or OptAddWithTime.
+func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error {
+ ao := addOpts{
+ t: time.Now(),
+ }
+
+ for _, opt := range opts {
+ if err := opt(&ao); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ // Find an unused descriptor.
+ i := 0
+ for _, rd := range f.rds {
+ if !rd.Used {
+ break
+ }
+ i++
+ }
+
+ if err := f.writeDataObject(i, di, ao.t); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if err := f.writeDescriptors(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ f.h.ModifiedAt = ao.t.Unix()
+
+ if err := f.writeHeader(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ return nil
+}
+
+// isLast return true if the data object associated with d is the last in f.
+func (f *FileImage) isLast(d *rawDescriptor) bool {
+ isLast := true
+
+ end := d.Offset + d.Size
+ f.WithDescriptors(func(d Descriptor) bool {
+ isLast = d.Offset()+d.Size() <= end
+ return !isLast
+ })
+
+ return isLast
+}
+
+// truncateAt truncates f at the start of the padded data object described by d.
+func (f *FileImage) truncateAt(d *rawDescriptor) error {
+ start := d.Offset + d.Size - d.SizeWithPadding
+
+ if err := f.rw.Truncate(start); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// deleteOpts accumulates object deletion options.
+type deleteOpts struct {
+ zero bool
+ compact bool
+ t time.Time
+}
+
+// DeleteOpt are used to specify object deletion options.
+type DeleteOpt func(*deleteOpts) error
+
+// OptDeleteZero specifies whether the deleted object should be zeroed.
+func OptDeleteZero(b bool) DeleteOpt {
+ return func(do *deleteOpts) error {
+ do.zero = b
+ return nil
+ }
+}
+
+// OptDeleteCompact specifies whether the image should be compacted following object deletion.
+func OptDeleteCompact(b bool) DeleteOpt {
+ return func(do *deleteOpts) error {
+ do.compact = b
+ return nil
+ }
+}
+
+// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic
+// modification of images.
+func OptDeleteDeterministic() DeleteOpt {
+ return func(do *deleteOpts) error {
+ do.t = time.Time{}
+ return nil
+ }
+}
+
+// OptDeleteWithTime specifies t as the image modification time.
+func OptDeleteWithTime(t time.Time) DeleteOpt {
+ return func(do *deleteOpts) error {
+ do.t = t
+ return nil
+ }
+}
+
+var errCompactNotImplemented = errors.New("compact not implemented for non-last object")
+
+// DeleteObject deletes the data object with id, according to opts.
+//
+// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following
+// object deletion, use OptDeleteCompact.
+//
+// By default, the image modification time is set to time.Now(). To override this, consider using
+// OptDeleteDeterministic or OptDeleteWithTime.
+func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error {
+ do := deleteOpts{
+ t: time.Now(),
+ }
+
+ for _, opt := range opts {
+ if err := opt(&do); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ d, err := f.getDescriptor(WithID(id))
+ if err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if do.compact && !f.isLast(d) {
+ return fmt.Errorf("%w", errCompactNotImplemented)
+ }
+
+ if do.zero {
+ if err := zeroData(f, d); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ if do.compact {
+ if err := f.truncateAt(d); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ f.h.DataSize -= d.SizeWithPadding
+ }
+
+ f.h.DescriptorsFree++
+ f.h.ModifiedAt = do.t.Unix()
+
+ index := 0
+ for i, od := range f.rds {
+ if od.ID == id {
+ index = i
+ break
+ }
+ }
+
+ if err := resetDescriptor(f, index); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if err := f.writeHeader(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ return nil
+}
+
+// setOpts accumulates object set options.
+type setOpts struct {
+ t time.Time
+}
+
+// SetOpt are used to specify object set options.
+type SetOpt func(*setOpts) error
+
+// OptSetDeterministic sets header/descriptor fields to values that support deterministic
+// modification of images.
+func OptSetDeterministic() SetOpt {
+ return func(so *setOpts) error {
+ so.t = time.Time{}
+ return nil
+ }
+}
+
+// OptSetWithTime specifies t as the image/object modification time.
+func OptSetWithTime(t time.Time) SetOpt {
+ return func(so *setOpts) error {
+ so.t = t
+ return nil
+ }
+}
+
+var (
+ errNotPartition = errors.New("data object not a partition")
+ errNotSystem = errors.New("data object not a system partition")
+)
+
+// SetPrimPart sets the specified system partition to be the primary one.
+//
+// By default, the image/object modification times are set to time.Now(). To override this,
+// consider using OptSetDeterministic or OptSetWithTime.
+func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error {
+ so := setOpts{
+ t: time.Now(),
+ }
+
+ for _, opt := range opts {
+ if err := opt(&so); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ descr, err := f.getDescriptor(WithID(id))
+ if err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if descr.DataType != DataPartition {
+ return fmt.Errorf("%w", errNotPartition)
+ }
+
+ fs, pt, arch, err := descr.getPartitionMetadata()
+ if err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ // if already primary system partition, nothing to do
+ if pt == PartPrimSys {
+ return nil
+ }
+
+ if pt != PartSystem {
+ return fmt.Errorf("%w", errNotSystem)
+ }
+
+ olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys))
+ if err != nil && !errors.Is(err, ErrObjectNotFound) {
+ return fmt.Errorf("%w", err)
+ }
+
+ f.h.Arch = getSIFArch(arch)
+
+ extra := partition{
+ Fstype: fs,
+ Parttype: PartPrimSys,
+ }
+ copy(extra.Arch[:], arch)
+
+ if err := descr.setExtra(extra); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ if olddescr != nil {
+ oldfs, _, oldarch, err := olddescr.getPartitionMetadata()
+ if err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ oldextra := partition{
+ Fstype: oldfs,
+ Parttype: PartSystem,
+ Arch: getSIFArch(oldarch),
+ }
+
+ if err := olddescr.setExtra(oldextra); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+
+ if err := f.writeDescriptors(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ f.h.ModifiedAt = so.t.Unix()
+
+ if err := f.writeHeader(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
new file mode 100644
index 000000000..da7a6a7c7
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
@@ -0,0 +1,267 @@
+// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
+// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
+// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+)
+
+// rawDescriptor represents an on-disk object descriptor.
+type rawDescriptor struct {
+ DataType DataType
+ Used bool
+ ID uint32
+ GroupID uint32
+ LinkedID uint32
+ Offset int64
+ Size int64
+ SizeWithPadding int64
+
+ CreatedAt int64
+ ModifiedAt int64
+ UID int64 // Deprecated: UID exists for historical compatibility and should not be used.
+ GID int64 // Deprecated: GID exists for historical compatibility and should not be used.
+ Name [descrNameLen]byte
+ Extra [descrMaxPrivLen]byte
+}
+
+// partition represents the SIF partition data object descriptor.
+type partition struct {
+ Fstype FSType
+ Parttype PartType
+ Arch archType
+}
+
+// signature represents the SIF signature data object descriptor.
+type signature struct {
+ Hashtype hashType
+ Entity [descrEntityLen]byte
+}
+
+// cryptoMessage represents the SIF crypto message object descriptor.
+type cryptoMessage struct {
+ Formattype FormatType
+ Messagetype MessageType
+}
+
+var errNameTooLarge = errors.New("name value too large")
+
+// setName encodes name into the name field of d.
+func (d *rawDescriptor) setName(name string) error {
+ if len(name) > len(d.Name) {
+ return errNameTooLarge
+ }
+
+ for i := copy(d.Name[:], name); i < len(d.Name); i++ {
+ d.Name[i] = 0
+ }
+
+ return nil
+}
+
+var errExtraTooLarge = errors.New("extra value too large")
+
+// setExtra encodes v into the extra field of d.
+func (d *rawDescriptor) setExtra(v interface{}) error {
+ if v == nil {
+ return nil
+ }
+
+ if binary.Size(v) > len(d.Extra) {
+ return errExtraTooLarge
+ }
+
+ b := new(bytes.Buffer)
+ if err := binary.Write(b, binary.LittleEndian, v); err != nil {
+ return err
+ }
+
+ for i := copy(d.Extra[:], b.Bytes()); i < len(d.Extra); i++ {
+ d.Extra[i] = 0
+ }
+
+ return nil
+}
+
+// getPartitionMetadata gets metadata for a partition data object.
+func (d rawDescriptor) getPartitionMetadata() (fs FSType, pt PartType, arch string, err error) {
+ if got, want := d.DataType, DataPartition; got != want {
+ return 0, 0, "", &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ var p partition
+
+ b := bytes.NewReader(d.Extra[:])
+ if err := binary.Read(b, binary.LittleEndian, &p); err != nil {
+ return 0, 0, "", fmt.Errorf("%w", err)
+ }
+
+ return p.Fstype, p.Parttype, p.Arch.GoArch(), nil
+}
+
+// isPartitionOfType returns true if d is a partition data object of type pt.
+func (d rawDescriptor) isPartitionOfType(pt PartType) bool {
+ _, t, _, err := d.getPartitionMetadata()
+ if err != nil {
+ return false
+ }
+ return t == pt
+}
+
+// Descriptor represents the SIF descriptor type.
+type Descriptor struct {
+ r io.ReaderAt // Backing storage.
+
+ raw rawDescriptor // Raw descriptor from image.
+
+ relativeID uint32 // ID relative to minimum ID of object group.
+}
+
+// DataType returns the type of data object.
+func (d Descriptor) DataType() DataType { return d.raw.DataType }
+
+// ID returns the data object ID of d.
+func (d Descriptor) ID() uint32 { return d.raw.ID }
+
+// GroupID returns the data object group ID of d, or zero if d is not part of a data object
+// group.
+func (d Descriptor) GroupID() uint32 { return d.raw.GroupID &^ descrGroupMask }
+
+// LinkedID returns the object/group ID d is linked to, or zero if d does not contain a linked
+// ID. If isGroup is true, the returned id is an object group ID. Otherwise, the returned id is a
+// data object ID.
+func (d Descriptor) LinkedID() (id uint32, isGroup bool) {
+ return d.raw.LinkedID &^ descrGroupMask, d.raw.LinkedID&descrGroupMask == descrGroupMask
+}
+
+// Offset returns the offset of the data object.
+func (d Descriptor) Offset() int64 { return d.raw.Offset }
+
+// Size returns the data object size.
+func (d Descriptor) Size() int64 { return d.raw.Size }
+
+// CreatedAt returns the creation time of the data object.
+func (d Descriptor) CreatedAt() time.Time { return time.Unix(d.raw.CreatedAt, 0) }
+
+// ModifiedAt returns the modification time of the data object.
+func (d Descriptor) ModifiedAt() time.Time { return time.Unix(d.raw.ModifiedAt, 0) }
+
+// Name returns the name of the data object.
+func (d Descriptor) Name() string { return strings.TrimRight(string(d.raw.Name[:]), "\000") }
+
+// PartitionMetadata gets metadata for a partition data object.
+func (d Descriptor) PartitionMetadata() (fs FSType, pt PartType, arch string, err error) {
+ return d.raw.getPartitionMetadata()
+}
+
+var errHashUnsupported = errors.New("hash algorithm unsupported")
+
+// getHashType converts ht into a crypto.Hash.
+func getHashType(ht hashType) (crypto.Hash, error) {
+ switch ht {
+ case hashSHA256:
+ return crypto.SHA256, nil
+ case hashSHA384:
+ return crypto.SHA384, nil
+ case hashSHA512:
+ return crypto.SHA512, nil
+ case hashBLAKE2S:
+ return crypto.BLAKE2s_256, nil
+ case hashBLAKE2B:
+ return crypto.BLAKE2b_256, nil
+ }
+ return 0, errHashUnsupported
+}
+
+// SignatureMetadata gets metadata for a signature data object.
+func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) {
+ if got, want := d.raw.DataType, DataSignature; got != want {
+ return ht, fp, &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ var s signature
+
+ b := bytes.NewReader(d.raw.Extra[:])
+ if err := binary.Read(b, binary.LittleEndian, &s); err != nil {
+ return ht, fp, fmt.Errorf("%w", err)
+ }
+
+ if ht, err = getHashType(s.Hashtype); err != nil {
+ return ht, fp, fmt.Errorf("%w", err)
+ }
+
+ fp = make([]byte, 20)
+ copy(fp, s.Entity[:])
+
+ return ht, fp, nil
+}
+
+// CryptoMessageMetadata gets metadata for a crypto message data object.
+func (d Descriptor) CryptoMessageMetadata() (FormatType, MessageType, error) {
+ if got, want := d.raw.DataType, DataCryptoMessage; got != want {
+ return 0, 0, &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ var m cryptoMessage
+
+ b := bytes.NewReader(d.raw.Extra[:])
+ if err := binary.Read(b, binary.LittleEndian, &m); err != nil {
+ return 0, 0, fmt.Errorf("%w", err)
+ }
+
+ return m.Formattype, m.Messagetype, nil
+}
+
+// GetData returns the data object associated with descriptor d.
+func (d Descriptor) GetData() ([]byte, error) {
+ b := make([]byte, d.raw.Size)
+ if _, err := io.ReadFull(d.GetReader(), b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// GetReader returns a io.Reader that reads the data object associated with descriptor d.
+func (d Descriptor) GetReader() io.Reader {
+ return io.NewSectionReader(d.r, d.raw.Offset, d.raw.Size)
+}
+
+// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from d.
+func (d Descriptor) GetIntegrityReader() io.Reader {
+ fields := []interface{}{
+ d.raw.DataType,
+ d.raw.Used,
+ d.relativeID,
+ d.raw.LinkedID,
+ d.raw.Size,
+ d.raw.CreatedAt,
+ d.raw.UID,
+ d.raw.GID,
+ }
+
+ // Encode endian-sensitive fields.
+ data := bytes.Buffer{}
+ for _, f := range fields {
+ if err := binary.Write(&data, binary.LittleEndian, f); err != nil {
+ panic(err) // (*bytes.Buffer).Write() is documented as always returning a nil error.
+ }
+ }
+
+ return io.MultiReader(
+ &data,
+ bytes.NewReader(d.raw.Name[:]),
+ bytes.NewReader(d.raw.Extra[:]),
+ )
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go
new file mode 100644
index 000000000..c55cf51f9
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go
@@ -0,0 +1,300 @@
+// Copyright (c) 2021, Sylabs Inc. All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "crypto"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// descriptorOpts accumulates data object options.
+type descriptorOpts struct {
+ groupID uint32
+ linkID uint32
+ alignment int
+ name string
+ extra interface{}
+ t time.Time
+}
+
+// DescriptorInputOpt are used to specify data object options.
+type DescriptorInputOpt func(DataType, *descriptorOpts) error
+
+// OptNoGroup specifies the data object is not contained within a data object group.
+func OptNoGroup() DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ opts.groupID = 0
+ return nil
+ }
+}
+
+// OptGroupID specifies groupID as data object group ID.
+func OptGroupID(groupID uint32) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ if groupID == 0 {
+ return ErrInvalidGroupID
+ }
+ opts.groupID = groupID
+ return nil
+ }
+}
+
+// OptLinkedID specifies that the data object is linked to the data object with the specified ID.
+func OptLinkedID(id uint32) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ if id == 0 {
+ return ErrInvalidObjectID
+ }
+ opts.linkID = id
+ return nil
+ }
+}
+
+// OptLinkedGroupID specifies that the data object is linked to the data object group with the
+// specified groupID.
+func OptLinkedGroupID(groupID uint32) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ if groupID == 0 {
+ return ErrInvalidGroupID
+ }
+ opts.linkID = groupID | descrGroupMask
+ return nil
+ }
+}
+
+// OptObjectAlignment specifies n as the data alignment requirement.
+func OptObjectAlignment(n int) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ opts.alignment = n
+ return nil
+ }
+}
+
+// OptObjectName specifies name as the data object name.
+func OptObjectName(name string) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ opts.name = name
+ return nil
+ }
+}
+
+// OptObjectTime specifies t as the data object creation time.
+func OptObjectTime(t time.Time) DescriptorInputOpt {
+ return func(_ DataType, opts *descriptorOpts) error {
+ opts.t = t
+ return nil
+ }
+}
+
+type unexpectedDataTypeError struct {
+ got DataType
+ want []DataType
+}
+
+func (e *unexpectedDataTypeError) Error() string {
+ return fmt.Sprintf("unexpected data type %v, expected one of: %v", e.got, e.want)
+}
+
+func (e *unexpectedDataTypeError) Is(target error) bool {
+ //nolint:errorlint // don't compare wrapped errors in Is()
+ t, ok := target.(*unexpectedDataTypeError)
+ if !ok {
+ return false
+ }
+
+ if len(t.want) > 0 {
+ // Use a map to check that the "want" errors in e and t contain the same values, ignoring
+ // any ordering differences.
+ acc := make(map[DataType]int, len(t.want))
+
+ // Increment counter for each data type in e.
+ for _, dt := range e.want {
+ if _, ok := acc[dt]; !ok {
+ acc[dt] = 0
+ }
+ acc[dt]++
+ }
+
+ // Decrement counter for each data type in e.
+ for _, dt := range t.want {
+ if _, ok := acc[dt]; !ok {
+ return false
+ }
+ acc[dt]--
+ }
+
+ // If the "want" errors in e and t are equivalent, all counters should be zero.
+ for _, n := range acc {
+ if n != 0 {
+ return false
+ }
+ }
+ }
+
+ return (e.got == t.got || t.got == 0)
+}
+
+// OptCryptoMessageMetadata sets metadata for a crypto message data object. The format type is set
+// to ft, and the message type is set to mt.
+//
+// If this option is applied to a data object with an incompatible type, an error is returned.
+func OptCryptoMessageMetadata(ft FormatType, mt MessageType) DescriptorInputOpt {
+ return func(t DataType, opts *descriptorOpts) error {
+ if got, want := t, DataCryptoMessage; got != want {
+ return &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ m := cryptoMessage{
+ Formattype: ft,
+ Messagetype: mt,
+ }
+
+ opts.extra = m
+ return nil
+ }
+}
+
+var errUnknownArchitcture = errors.New("unknown architecture")
+
+// OptPartitionMetadata sets metadata for a partition data object. The filesystem type is set to
+// fs, the partition type is set to pt, and the CPU architecture is set to arch. The value of arch
+// should be the architecture as represented by the Go runtime.
+//
+// If this option is applied to a data object with an incompatible type, an error is returned.
+func OptPartitionMetadata(fs FSType, pt PartType, arch string) DescriptorInputOpt {
+ return func(t DataType, opts *descriptorOpts) error {
+ if got, want := t, DataPartition; got != want {
+ return &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ sifarch := getSIFArch(arch)
+ if sifarch == hdrArchUnknown {
+ return fmt.Errorf("%w: %v", errUnknownArchitcture, arch)
+ }
+
+ p := partition{
+ Fstype: fs,
+ Parttype: pt,
+ Arch: sifarch,
+ }
+
+ opts.extra = p
+ return nil
+ }
+}
+
+// sifHashType converts h into a HashType.
+func sifHashType(h crypto.Hash) hashType {
+ switch h {
+ case crypto.SHA256:
+ return hashSHA256
+ case crypto.SHA384:
+ return hashSHA384
+ case crypto.SHA512:
+ return hashSHA512
+ case crypto.BLAKE2s_256:
+ return hashBLAKE2S
+ case crypto.BLAKE2b_256:
+ return hashBLAKE2B
+ }
+ return 0
+}
+
+// OptSignatureMetadata sets metadata for a signature data object. The hash type is set to ht, and
+// the signing entity fingerprint is set to fp.
+//
+// If this option is applied to a data object with an incompatible type, an error is returned.
+func OptSignatureMetadata(ht crypto.Hash, fp []byte) DescriptorInputOpt {
+ return func(t DataType, opts *descriptorOpts) error {
+ if got, want := t, DataSignature; got != want {
+ return &unexpectedDataTypeError{got, []DataType{want}}
+ }
+
+ s := signature{
+ Hashtype: sifHashType(ht),
+ }
+ copy(s.Entity[:], fp)
+
+ opts.extra = s
+ return nil
+ }
+}
+
+// DescriptorInput describes a new data object.
+type DescriptorInput struct {
+ dt DataType
+ r io.Reader
+ opts descriptorOpts
+}
+
+// DefaultObjectGroup is the default group that data objects are placed in.
+const DefaultObjectGroup = 1
+
+// NewDescriptorInput returns a DescriptorInput representing a data object of type t, with contents
+// read from r, configured according to opts.
+//
+// It is possible (and often necessary) to store additional metadata related to certain types of
+// data objects. Consider supplying options such as OptCryptoMessageMetadata, OptPartitionMetadata,
+// and OptSignatureMetadata for this purpose.
+//
+// By default, the data object will be placed in the default data object group (1). To override
+// this behavior, use OptNoGroup or OptGroupID. To link this data object, use OptLinkedID or
+// OptLinkedGroupID.
+//
+// By default, the data object will be aligned according to the system's memory page size. To
+// override this behavior, consider using OptObjectAlignment.
+//
+// By default, no name is set for data object. To set a name, use OptObjectName.
+//
+// When creating a new image, data object creation/modification times are set to the image creation
+// time. When modifying an existing image, the data object creation/modification time is set to the
+// image modification time. To override this behavior, consider using OptObjectTime.
+func NewDescriptorInput(t DataType, r io.Reader, opts ...DescriptorInputOpt) (DescriptorInput, error) {
+ dopts := descriptorOpts{
+ groupID: DefaultObjectGroup,
+ alignment: os.Getpagesize(),
+ }
+
+ for _, opt := range opts {
+ if err := opt(t, &dopts); err != nil {
+ return DescriptorInput{}, fmt.Errorf("%w", err)
+ }
+ }
+
+ di := DescriptorInput{
+ dt: t,
+ r: r,
+ opts: dopts,
+ }
+
+ return di, nil
+}
+
+// fillDescriptor fills d according to di. If di does not explicitly specify a time value, use t.
+func (di DescriptorInput) fillDescriptor(t time.Time, d *rawDescriptor) error {
+ d.DataType = di.dt
+ d.GroupID = di.opts.groupID | descrGroupMask
+ d.LinkedID = di.opts.linkID
+
+ if !di.opts.t.IsZero() {
+ t = di.opts.t
+ }
+ d.CreatedAt = t.Unix()
+ d.ModifiedAt = t.Unix()
+
+ d.UID = 0
+ d.GID = 0
+
+ if err := d.setName(di.opts.name); err != nil {
+ return err
+ }
+
+ return d.setExtra(di.opts.extra)
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go
new file mode 100644
index 000000000..75266e194
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/load.go
@@ -0,0 +1,174 @@
+// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
+// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
+// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+)
+
+var (
+ errInvalidMagic = errors.New("invalid SIF magic")
+ errIncompatibleVersion = errors.New("incompatible SIF version")
+)
+
+// isValidSif looks at key fields from the global header to assess SIF validity.
+func isValidSif(f *FileImage) error {
+ if f.h.Magic != hdrMagic {
+ return errInvalidMagic
+ }
+
+ if f.h.Version != CurrentVersion.bytes() {
+ return errIncompatibleVersion
+ }
+
+ return nil
+}
+
+// populateMinIDs populates the minIDs field of f.
+func (f *FileImage) populateMinIDs() {
+ f.minIDs = make(map[uint32]uint32)
+ f.WithDescriptors(func(d Descriptor) bool {
+ if minID, ok := f.minIDs[d.raw.GroupID]; !ok || d.ID() < minID {
+ f.minIDs[d.raw.GroupID] = d.ID()
+ }
+ return false
+ })
+}
+
+// loadContainer loads a SIF image from rw.
+func loadContainer(rw ReadWriter) (*FileImage, error) {
+ f := FileImage{rw: rw}
+
+ // Read global header.
+ err := binary.Read(
+ io.NewSectionReader(rw, 0, int64(binary.Size(f.h))),
+ binary.LittleEndian,
+ &f.h,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("reading global header: %w", err)
+ }
+
+ if err := isValidSif(&f); err != nil {
+ return nil, err
+ }
+
+ // Read descriptors.
+ f.rds = make([]rawDescriptor, f.h.DescriptorsTotal)
+ err = binary.Read(
+ io.NewSectionReader(rw, f.h.DescriptorsOffset, f.h.DescriptorsSize),
+ binary.LittleEndian,
+ &f.rds,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("reading descriptors: %w", err)
+ }
+
+ f.populateMinIDs()
+
+ return &f, nil
+}
+
+// loadOpts accumulates container loading options.
+type loadOpts struct {
+ flag int
+ closeOnUnload bool
+}
+
+// LoadOpt are used to specify container loading options.
+type LoadOpt func(*loadOpts) error
+
+// OptLoadWithFlag specifies flag (os.O_RDONLY etc.) to be used when opening the container file.
+func OptLoadWithFlag(flag int) LoadOpt {
+ return func(lo *loadOpts) error {
+ lo.flag = flag
+ return nil
+ }
+}
+
+// OptLoadWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer.
+// By default, the ReadWriter will be closed if it implements the io.Closer interface.
+func OptLoadWithCloseOnUnload(b bool) LoadOpt {
+ return func(lo *loadOpts) error {
+ lo.closeOnUnload = b
+ return nil
+ }
+}
+
+// LoadContainerFromPath loads a new SIF container from path, according to opts.
+//
+// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
+// are released.
+//
+// By default, the file is opened for read and write access. To change this behavior, consider
+// using OptLoadWithFlag.
+func LoadContainerFromPath(path string, opts ...LoadOpt) (*FileImage, error) {
+ lo := loadOpts{
+ flag: os.O_RDWR,
+ }
+
+ for _, opt := range opts {
+ if err := opt(&lo); err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+ }
+
+ fp, err := os.OpenFile(path, lo.flag, 0)
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f, err := loadContainer(fp)
+ if err != nil {
+ fp.Close()
+
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f.closeOnUnload = true
+ return f, nil
+}
+
+// LoadContainer loads a new SIF container from rw, according to opts.
+//
+// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
+// are released. By default, UnloadContainer will close rw if it implements the io.Closer
+// interface. To change this behavior, consider using OptLoadWithCloseOnUnload.
+func LoadContainer(rw ReadWriter, opts ...LoadOpt) (*FileImage, error) {
+ lo := loadOpts{
+ closeOnUnload: true,
+ }
+
+ for _, opt := range opts {
+ if err := opt(&lo); err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+ }
+
+ f, err := loadContainer(rw)
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ f.closeOnUnload = lo.closeOnUnload
+ return f, nil
+}
+
+// UnloadContainer unloads f, releasing associated resources.
+func (f *FileImage) UnloadContainer() error {
+ if c, ok := f.rw.(io.Closer); ok && f.closeOnUnload {
+ if err := c.Close(); err != nil {
+ return fmt.Errorf("%w", err)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go
new file mode 100644
index 000000000..635d6e89c
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go
@@ -0,0 +1,210 @@
+// Copyright (c) 2021, Sylabs Inc. All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+package sif
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrNoObjects is the error returned when an image contains no data objects.
+var ErrNoObjects = errors.New("no objects in image")
+
+// ErrObjectNotFound is the error returned when a data object is not found.
+var ErrObjectNotFound = errors.New("object not found")
+
+// ErrMultipleObjectsFound is the error returned when multiple data objects are found.
+var ErrMultipleObjectsFound = errors.New("multiple objects found")
+
+// ErrInvalidObjectID is the error returned when an invalid object ID is supplied.
+var ErrInvalidObjectID = errors.New("invalid object ID")
+
+// ErrInvalidGroupID is the error returned when an invalid group ID is supplied.
+var ErrInvalidGroupID = errors.New("invalid group ID")
+
+// DescriptorSelectorFunc returns true if d matches, and false otherwise.
+type DescriptorSelectorFunc func(d Descriptor) (bool, error)
+
+// WithDataType selects descriptors that have data type dt.
+func WithDataType(dt DataType) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ return d.DataType() == dt, nil
+ }
+}
+
+// WithID selects descriptors with a matching ID.
+func WithID(id uint32) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ if id == 0 {
+ return false, ErrInvalidObjectID
+ }
+ return d.ID() == id, nil
+ }
+}
+
+// WithNoGroup selects descriptors that are not contained within an object group.
+func WithNoGroup() DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ return d.GroupID() == 0, nil
+ }
+}
+
+// WithGroupID returns a selector func that selects descriptors with a matching groupID.
+func WithGroupID(groupID uint32) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ if groupID == 0 {
+ return false, ErrInvalidGroupID
+ }
+ return d.GroupID() == groupID, nil
+ }
+}
+
+// WithLinkedID selects descriptors that are linked to the data object with specified ID.
+func WithLinkedID(id uint32) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ if id == 0 {
+ return false, ErrInvalidObjectID
+ }
+ linkedID, isGroup := d.LinkedID()
+ return !isGroup && linkedID == id, nil
+ }
+}
+
+// WithLinkedGroupID selects descriptors that are linked to the data object group with specified
+// ID.
+func WithLinkedGroupID(groupID uint32) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ if groupID == 0 {
+ return false, ErrInvalidGroupID
+ }
+ linkedID, isGroup := d.LinkedID()
+ return isGroup && linkedID == groupID, nil
+ }
+}
+
+// WithPartitionType selects descriptors containing a partition of type pt.
+func WithPartitionType(pt PartType) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ return d.raw.isPartitionOfType(pt), nil
+ }
+}
+
+// descriptorFromRaw populates a Descriptor from rd.
+func (f *FileImage) descriptorFromRaw(rd *rawDescriptor) Descriptor {
+ return Descriptor{
+ raw: *rd,
+ r: f.rw,
+ relativeID: rd.ID - f.minIDs[rd.GroupID],
+ }
+}
+
+// GetDescriptors returns a slice of in-use descriptors for which all selector funcs return true.
+// If the image contains no data objects, an error wrapping ErrNoObjects is returned.
+func (f *FileImage) GetDescriptors(fns ...DescriptorSelectorFunc) ([]Descriptor, error) {
+ if f.DescriptorsFree() == f.DescriptorsTotal() {
+ return nil, fmt.Errorf("%w", ErrNoObjects)
+ }
+
+ var ds []Descriptor
+
+ err := f.withDescriptors(multiSelectorFunc(fns...), func(d *rawDescriptor) error {
+ ds = append(ds, f.descriptorFromRaw(d))
+ return nil
+ })
+ if err != nil {
+ return nil, fmt.Errorf("%w", err)
+ }
+
+ return ds, nil
+}
+
+// getDescriptor returns a pointer to the in-use descriptor selected by fns. If no descriptor is
+// selected by fns, ErrObjectNotFound is returned. If multiple descriptors are selected by fns,
+// ErrMultipleObjectsFound is returned.
+func (f *FileImage) getDescriptor(fns ...DescriptorSelectorFunc) (*rawDescriptor, error) {
+ var d *rawDescriptor
+
+ err := f.withDescriptors(multiSelectorFunc(fns...), func(found *rawDescriptor) error {
+ if d != nil {
+ return ErrMultipleObjectsFound
+ }
+ d = found
+ return nil
+ })
+
+ if err == nil && d == nil {
+ err = ErrObjectNotFound
+ }
+
+ return d, err
+}
+
+// GetDescriptor returns the in-use descriptor selected by fns. If the image contains no data
+// objects, an error wrapping ErrNoObjects is returned. If no descriptor is selected by fns, an
+// error wrapping ErrObjectNotFound is returned. If multiple descriptors are selected by fns, an
+// error wrapping ErrMultipleObjectsFound is returned.
+func (f *FileImage) GetDescriptor(fns ...DescriptorSelectorFunc) (Descriptor, error) {
+ if f.DescriptorsFree() == f.DescriptorsTotal() {
+ return Descriptor{}, fmt.Errorf("%w", ErrNoObjects)
+ }
+
+ d, err := f.getDescriptor(fns...)
+ if err != nil {
+ return Descriptor{}, fmt.Errorf("%w", err)
+ }
+
+ return f.descriptorFromRaw(d), nil
+}
+
+// multiSelectorFunc returns a DescriptorSelectorFunc that selects a descriptor iff all of fns
+// select the descriptor.
+func multiSelectorFunc(fns ...DescriptorSelectorFunc) DescriptorSelectorFunc {
+ return func(d Descriptor) (bool, error) {
+ for _, fn := range fns {
+ if ok, err := fn(d); !ok || err != nil {
+ return ok, err
+ }
+ }
+ return true, nil
+ }
+}
+
+// withDescriptors calls onMatchFn with each in-use descriptor in f for which selectFn returns
+// true. If selectFn or onMatchFn return a non-nil error, the iteration halts, and the error is
+// returned to the caller.
+func (f *FileImage) withDescriptors(selectFn DescriptorSelectorFunc, onMatchFn func(*rawDescriptor) error) error {
+ for i, d := range f.rds {
+ if !d.Used {
+ continue
+ }
+
+ if ok, err := selectFn(f.descriptorFromRaw(&f.rds[i])); err != nil {
+ return err
+ } else if !ok {
+ continue
+ }
+
+ if err := onMatchFn(&f.rds[i]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+var errAbort = errors.New("abort")
+
+// abortOnMatch is a semantic convenience function that always returns a non-nil error, which can
+// be used as a no-op matchFn.
+func abortOnMatch(*rawDescriptor) error { return errAbort }
+
+// WithDescriptors calls fn with each in-use descriptor in f, until fn returns true.
+func (f *FileImage) WithDescriptors(fn func(d Descriptor) bool) {
+ selectFn := func(d Descriptor) (bool, error) {
+ return fn(d), nil
+ }
+ _ = f.withDescriptors(selectFn, abortOnMatch)
+}
diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go
new file mode 100644
index 000000000..704acee4a
--- /dev/null
+++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go
@@ -0,0 +1,364 @@
+// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
+// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
+// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
+// This software is licensed under a 3-clause BSD license. Please consult the
+// LICENSE file distributed with the sources of this project regarding your
+// rights to use or distribute this software.
+
+// Package sif implements data structures and routines to create
+// and access SIF files.
+//
+// Layout of a SIF file (example):
+//
+// .================================================.
+// | GLOBAL HEADER: Sifheader |
+// | - launch: "#!/usr/bin/env..." |
+// | - magic: "SIF_MAGIC" |
+// | - version: "1" |
+// | - arch: "4" |
+// | - uuid: b2659d4e-bd50-4ea5-bd17-eec5e54f918e |
+// | - ctime: 1504657553 |
+// | - mtime: 1504657653 |
+// | - ndescr: 3 |
+// | - descroff: 120 | --.
+// | - descrlen: 432 | |
+// | - dataoff: 4096 | |
+// | - datalen: 619362 | |
+// |------------------------------------------------| <-'
+// | DESCR[0]: Sifdeffile |
+// | - Sifcommon |
+// | - datatype: DATA_DEFFILE |
+// | - id: 1 |
+// | - groupid: 1 |
+// | - link: NONE |
+// | - fileoff: 4096 | --.
+// | - filelen: 222 | |
+// |------------------------------------------------| <-----.
+// | DESCR[1]: Sifpartition | | |
+// | - Sifcommon | | |
+// | - datatype: DATA_PARTITION | | |
+// | - id: 2 | | |
+// | - groupid: 1 | | |
+// | - link: NONE | | |
+// | - fileoff: 4318 | ----. |
+// | - filelen: 618496 | | | |
+// | - fstype: Squashfs | | | |
+// | - parttype: System | | | |
+// | - content: Linux | | | |
+// |------------------------------------------------| | | |
+// | DESCR[2]: Sifsignature | | | |
+// | - Sifcommon | | | |
+// | - datatype: DATA_SIGNATURE | | | |
+// | - id: 3 | | | |
+// | - groupid: NONE | | | |
+// | - link: 2 | ------'
+// | - fileoff: 622814 | ------.
+// | - filelen: 644 | | | |
+// | - hashtype: SHA384 | | | |
+// | - entity: @ | | | |
+// |------------------------------------------------| <-' | |
+// | Definition file data | | |
+// | . | | |
+// | . | | |
+// | . | | |
+// |------------------------------------------------| <---' |
+// | File system partition image | |
+// | . | |
+// | . | |
+// | . | |
+// |------------------------------------------------| <-----'
+// | Signed verification data |
+// | . |
+// | . |
+// | . |
+// `================================================'
+//
+package sif
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/google/uuid"
+)
+
+// SIF header constants and quantities.
+const (
+ hdrLaunchLen = 32 // len("#!/usr/bin/env... ")
+ hdrMagicLen = 10 // len("SIF_MAGIC")
+ hdrVersionLen = 3 // len("99")
+)
+
+var hdrMagic = [...]byte{'S', 'I', 'F', '_', 'M', 'A', 'G', 'I', 'C', '\x00'}
+
+// SpecVersion specifies a SIF specification version.
+type SpecVersion uint8
+
+func (v SpecVersion) String() string { return fmt.Sprintf("%02d", v) }
+
+// bytes returns the value of b, formatted for direct inclusion in a SIF header.
+func (v SpecVersion) bytes() [hdrVersionLen]byte {
+ var b [3]byte
+ copy(b[:], fmt.Sprintf("%02d", v))
+ return b
+}
+
+// SIF specification versions.
+const (
+ version01 SpecVersion = iota + 1
+)
+
+// CurrentVersion specifies the current SIF specification version.
+const CurrentVersion = version01
+
+const (
+ descrGroupMask = 0xf0000000 // groups start at that offset
+ descrEntityLen = 256 // len("Joe Bloe <jbloe@gmail.com>...")
+ descrNameLen = 128 // descriptor name (string identifier)
+ descrMaxPrivLen = 384 // size reserved for descriptor specific data
+)
+
+// DataType represents the different SIF data object types stored in the image.
+type DataType int32
+
+// List of supported SIF data types.
+const (
+ DataDeffile DataType = iota + 0x4001 // definition file data object
+ DataEnvVar // environment variables data object
+ DataLabels // JSON labels data object
+ DataPartition // file system data object
+ DataSignature // signing/verification data object
+ DataGenericJSON // generic JSON meta-data
+ DataGeneric // generic / raw data
+ DataCryptoMessage // cryptographic message data object
+)
+
+// String returns a human-readable representation of t.
+func (t DataType) String() string {
+ switch t {
+ case DataDeffile:
+ return "Def.FILE"
+ case DataEnvVar:
+ return "Env.Vars"
+ case DataLabels:
+ return "JSON.Labels"
+ case DataPartition:
+ return "FS"
+ case DataSignature:
+ return "Signature"
+ case DataGenericJSON:
+ return "JSON.Generic"
+ case DataGeneric:
+ return "Generic/Raw"
+ case DataCryptoMessage:
+ return "Cryptographic Message"
+ }
+ return "Unknown"
+}
+
+// FSType represents the different SIF file system types found in partition data objects.
+type FSType int32
+
+// List of supported file systems.
+const (
+ FsSquash FSType = iota + 1 // Squashfs file system, RDONLY
+ FsExt3 // EXT3 file system, RDWR (deprecated)
+ FsImmuObj // immutable data object archive
+ FsRaw // raw data
+ FsEncryptedSquashfs // Encrypted Squashfs file system, RDONLY
+)
+
+// String returns a human-readable representation of t.
+func (t FSType) String() string {
+ switch t {
+ case FsSquash:
+ return "Squashfs"
+ case FsExt3:
+ return "Ext3"
+ case FsImmuObj:
+ return "Archive"
+ case FsRaw:
+ return "Raw"
+ case FsEncryptedSquashfs:
+ return "Encrypted squashfs"
+ }
+ return "Unknown"
+}
+
+// PartType represents the different SIF container partition types (system and data).
+type PartType int32
+
+// List of supported partition types.
+const (
+ PartSystem PartType = iota + 1 // partition hosts an operating system
+ PartPrimSys // partition hosts the primary operating system
+ PartData // partition hosts data only
+ PartOverlay // partition hosts an overlay
+)
+
+// String returns a human-readable representation of t.
+func (t PartType) String() string {
+ switch t {
+ case PartSystem:
+ return "System"
+ case PartPrimSys:
+ return "*System"
+ case PartData:
+ return "Data"
+ case PartOverlay:
+ return "Overlay"
+ }
+ return "Unknown"
+}
+
+// hashType represents the different SIF hashing function types used to fingerprint data objects.
+type hashType int32
+
+// List of supported hash functions.
+const (
+ hashSHA256 hashType = iota + 1
+ hashSHA384
+ hashSHA512
+ hashBLAKE2S
+ hashBLAKE2B
+)
+
+// FormatType represents the different formats used to store cryptographic message objects.
+type FormatType int32
+
+// List of supported cryptographic message formats.
+const (
+ FormatOpenPGP FormatType = iota + 1
+ FormatPEM
+)
+
+// String returns a human-readable representation of t.
+func (t FormatType) String() string {
+ switch t {
+ case FormatOpenPGP:
+ return "OpenPGP"
+ case FormatPEM:
+ return "PEM"
+ }
+ return "Unknown"
+}
+
+// MessageType represents the different messages stored within cryptographic message objects.
+type MessageType int32
+
+// List of supported cryptographic message formats.
+const (
+ // openPGP formatted messages.
+ MessageClearSignature MessageType = 0x100
+
+ // PEM formatted messages.
+ MessageRSAOAEP MessageType = 0x200
+)
+
+// String returns a human-readable representation of t.
+func (t MessageType) String() string {
+ switch t {
+ case MessageClearSignature:
+ return "Clear Signature"
+ case MessageRSAOAEP:
+ return "RSA-OAEP"
+ }
+ return "Unknown"
+}
+
+// header describes a loaded SIF file.
+type header struct {
+ LaunchScript [hdrLaunchLen]byte
+
+ Magic [hdrMagicLen]byte
+ Version [hdrVersionLen]byte
+ Arch archType
+ ID uuid.UUID
+
+ CreatedAt int64
+ ModifiedAt int64
+
+ DescriptorsFree int64
+ DescriptorsTotal int64
+ DescriptorsOffset int64
+ DescriptorsSize int64
+ DataOffset int64
+ DataSize int64
+}
+
+// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from h.
+func (h header) GetIntegrityReader() io.Reader {
+ return io.MultiReader(
+ bytes.NewReader(h.LaunchScript[:]),
+ bytes.NewReader(h.Magic[:]),
+ bytes.NewReader(h.Version[:]),
+ bytes.NewReader(h.ID[:]),
+ )
+}
+
+// ReadWriter describes the interface required to read and write SIF images.
+type ReadWriter interface {
+ io.ReaderAt
+ io.WriteSeeker
+ Truncate(int64) error
+}
+
+// FileImage describes the representation of a SIF file in memory.
+type FileImage struct {
+ rw ReadWriter // Backing storage for image.
+
+ h header // Raw global header from image.
+ rds []rawDescriptor // Raw descriptors from image.
+
+ closeOnUnload bool // Close rw on Unload.
+ minIDs map[uint32]uint32 // Minimum object IDs for each group ID.
+}
+
+// LaunchScript returns the image launch script.
+func (f *FileImage) LaunchScript() string {
+ return string(bytes.TrimRight(f.h.LaunchScript[:], "\x00"))
+}
+
+// Version returns the SIF specification version of the image.
+func (f *FileImage) Version() string {
+ return string(bytes.TrimRight(f.h.Version[:], "\x00"))
+}
+
+// PrimaryArch returns the primary CPU architecture of the image, or "unknown" if the primary CPU
+// architecture cannot be determined.
+func (f *FileImage) PrimaryArch() string { return f.h.Arch.GoArch() }
+
+// ID returns the ID of the image.
+func (f *FileImage) ID() string { return f.h.ID.String() }
+
+// CreatedAt returns the creation time of the image.
+func (f *FileImage) CreatedAt() time.Time { return time.Unix(f.h.CreatedAt, 0) }
+
+// ModifiedAt returns the last modification time of the image.
+func (f *FileImage) ModifiedAt() time.Time { return time.Unix(f.h.ModifiedAt, 0) }
+
+// DescriptorsFree returns the number of free descriptors in the image.
+func (f *FileImage) DescriptorsFree() int64 { return f.h.DescriptorsFree }
+
+// DescriptorsTotal returns the total number of descriptors in the image.
+func (f *FileImage) DescriptorsTotal() int64 { return f.h.DescriptorsTotal }
+
+// DescriptorsOffset returns the offset (in bytes) of the descriptors section in the image.
+func (f *FileImage) DescriptorsOffset() int64 { return f.h.DescriptorsOffset }
+
+// DescriptorsSize returns the size (in bytes) of the descriptors section in the image.
+func (f *FileImage) DescriptorsSize() int64 { return f.h.DescriptorsSize }
+
+// DataOffset returns the offset (in bytes) of the data section in the image.
+func (f *FileImage) DataOffset() int64 { return f.h.DataOffset }
+
+// DataSize returns the size (in bytes) of the data section in the image.
+func (f *FileImage) DataSize() int64 { return f.h.DataSize }
+
+// GetHeaderIntegrityReader returns an io.Reader that reads the integrity-protected fields from the
+// header of the image.
+func (f *FileImage) GetHeaderIntegrityReader() io.Reader {
+ return f.h.GetIntegrityReader()
+}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar.go b/vendor/github.com/vbauerster/mpb/v7/bar.go
index 35644a411..646cb471a 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar.go
@@ -5,7 +5,6 @@ import (
"context"
"fmt"
"io"
- "log"
"runtime/debug"
"strings"
"sync"
@@ -36,7 +35,6 @@ type Bar struct {
cacheState *bState
container *Progress
- dlogger *log.Logger
recoveredPanic interface{}
}
@@ -64,7 +62,7 @@ type bState struct {
averageDecorators []decor.AverageDecorator
ewmaDecorators []decor.EwmaDecorator
shutdownListeners []decor.ShutdownListener
- bufP, bufB, bufA *bytes.Buffer
+ buffers [3]*bytes.Buffer
filler BarFiller
middleware func(BarFiller) BarFiller
extender extenderFunc
@@ -81,7 +79,6 @@ type frame struct {
}
func newBar(container *Progress, bs *bState) *Bar {
- logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id)
ctx, cancel := context.WithCancel(container.ctx)
bar := &Bar{
@@ -93,7 +90,6 @@ func newBar(container *Progress, bs *bState) *Bar {
frameCh: make(chan *frame, 1),
done: make(chan struct{}),
cancel: cancel,
- dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile),
}
go bar.serve(ctx, bs)
@@ -106,7 +102,7 @@ func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser {
if r == nil {
panic("expected non nil io.Reader")
}
- return newProxyReader(r, b)
+ return b.newProxyReader(r)
}
// ID returs id of the bar.
@@ -279,7 +275,7 @@ func (b *Bar) Abort(drop bool) {
done := make(chan struct{})
select {
case b.operateState <- func(s *bState) {
- if s.completed == true {
+ if s.completed {
close(done)
return
}
@@ -346,13 +342,16 @@ func (b *Bar) render(tw int) {
// recovering if user defined decorator panics for example
if p := recover(); p != nil {
if b.recoveredPanic == nil {
+ if s.debugOut != nil {
+ fmt.Fprintln(s.debugOut, p)
+ _, _ = s.debugOut.Write(debug.Stack())
+ }
s.extender = makePanicExtender(p)
b.toShutdown = !b.toShutdown
b.recoveredPanic = p
}
reader, lines := s.extender(nil, s.reqWidth, stat)
b.frameCh <- &frame{reader, lines + 1}
- b.dlogger.Println(p)
}
s.completeFlushed = s.completed
}()
@@ -429,40 +428,41 @@ func (b *Bar) wSyncTable() [][]chan int {
}
func (s *bState) draw(stat decor.Statistics) io.Reader {
+ bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2]
nlr := strings.NewReader("\n")
tw := stat.AvailableWidth
for _, d := range s.pDecorators {
str := d.Decor(stat)
stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
- s.bufP.WriteString(str)
+ bufP.WriteString(str)
}
if stat.AvailableWidth < 1 {
- trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…"))
- s.bufP.Reset()
+ trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), tw, "…"))
+ bufP.Reset()
return io.MultiReader(trunc, nlr)
}
if !s.trimSpace && stat.AvailableWidth > 1 {
stat.AvailableWidth -= 2
- s.bufB.WriteByte(' ')
- defer s.bufB.WriteByte(' ')
+ bufB.WriteByte(' ')
+ defer bufB.WriteByte(' ')
}
tw = stat.AvailableWidth
for _, d := range s.aDecorators {
str := d.Decor(stat)
stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
- s.bufA.WriteString(str)
+ bufA.WriteString(str)
}
if stat.AvailableWidth < 1 {
- trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…"))
- s.bufA.Reset()
- return io.MultiReader(s.bufP, s.bufB, trunc, nlr)
+ trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), tw, "…"))
+ bufA.Reset()
+ return io.MultiReader(bufP, bufB, trunc, nlr)
}
- s.filler.Fill(s.bufB, s.reqWidth, stat)
+ s.filler.Fill(bufB, s.reqWidth, stat)
- return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr)
+ return io.MultiReader(bufP, bufB, bufA, nlr)
}
func (s *bState) wSyncTable() [][]chan int {
@@ -489,39 +489,51 @@ func (s *bState) wSyncTable() [][]chan int {
func (s bState) decoratorEwmaUpdate(dur time.Duration) {
wg := new(sync.WaitGroup)
- wg.Add(len(s.ewmaDecorators))
- for _, d := range s.ewmaDecorators {
- d := d
- go func() {
+ for i := 0; i < len(s.ewmaDecorators); i++ {
+ switch d := s.ewmaDecorators[i]; i {
+ case len(s.ewmaDecorators) - 1:
d.EwmaUpdate(s.lastIncrement, dur)
- wg.Done()
- }()
+ default:
+ wg.Add(1)
+ go func() {
+ d.EwmaUpdate(s.lastIncrement, dur)
+ wg.Done()
+ }()
+ }
}
wg.Wait()
}
func (s bState) decoratorAverageAdjust(start time.Time) {
wg := new(sync.WaitGroup)
- wg.Add(len(s.averageDecorators))
- for _, d := range s.averageDecorators {
- d := d
- go func() {
+ for i := 0; i < len(s.averageDecorators); i++ {
+ switch d := s.averageDecorators[i]; i {
+ case len(s.averageDecorators) - 1:
d.AverageAdjust(start)
- wg.Done()
- }()
+ default:
+ wg.Add(1)
+ go func() {
+ d.AverageAdjust(start)
+ wg.Done()
+ }()
+ }
}
wg.Wait()
}
func (s bState) decoratorShutdownNotify() {
wg := new(sync.WaitGroup)
- wg.Add(len(s.shutdownListeners))
- for _, d := range s.shutdownListeners {
- d := d
- go func() {
+ for i := 0; i < len(s.shutdownListeners); i++ {
+ switch d := s.shutdownListeners[i]; i {
+ case len(s.shutdownListeners) - 1:
d.Shutdown()
- wg.Done()
- }()
+ default:
+ wg.Add(1)
+ go func() {
+ d.Shutdown()
+ wg.Done()
+ }()
+ }
}
wg.Wait()
}
@@ -547,14 +559,11 @@ func extractBaseDecorator(d decor.Decorator) decor.Decorator {
func makePanicExtender(p interface{}) extenderFunc {
pstr := fmt.Sprint(p)
- stack := debug.Stack()
- stackLines := bytes.Count(stack, []byte("\n"))
return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) {
mr := io.MultiReader(
strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")),
- strings.NewReader(fmt.Sprintf("\n%#v\n", st)),
- bytes.NewReader(stack),
+ strings.NewReader("\n"),
)
- return mr, stackLines + 1
+ return mr, 0
}
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
index 80b210455..54b7bfd6f 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
@@ -32,13 +32,13 @@ type BarStyleComposer interface {
}
type bFiller struct {
+ rev bool
components [components]*component
tip struct {
count uint
onComplete *component
frames []*component
}
- flush func(dst io.Writer, filling, padding [][]byte)
}
type component struct {
@@ -113,14 +113,7 @@ func (s *barStyle) Reverse() BarStyleComposer {
}
func (s *barStyle) Build() BarFiller {
- bf := new(bFiller)
- if s.rev {
- bf.flush = func(dst io.Writer, filling, padding [][]byte) {
- flush(dst, padding, filling)
- }
- } else {
- bf.flush = flush
- }
+ bf := &bFiller{rev: s.rev}
bf.components[iLbound] = &component{
width: runewidth.StringWidth(stripansi.Strip(s.lbound)),
bytes: []byte(s.lbound),
@@ -164,8 +157,9 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
return
}
- w.Write(s.components[iLbound].bytes)
- defer w.Write(s.components[iRbound].bytes)
+ ow := optimisticWriter(w)
+ ow(s.components[iLbound].bytes)
+ defer ow(s.components[iRbound].bytes)
if width == 0 {
return
@@ -236,14 +230,27 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
}
}
- s.flush(w, filling, padding)
+ if s.rev {
+ flush(ow, padding, filling)
+ } else {
+ flush(ow, filling, padding)
+ }
}
-func flush(dst io.Writer, filling, padding [][]byte) {
+func flush(ow func([]byte), filling, padding [][]byte) {
for i := len(filling) - 1; i >= 0; i-- {
- dst.Write(filling[i])
+ ow(filling[i])
}
for i := 0; i < len(padding); i++ {
- dst.Write(padding[i])
+ ow(padding[i])
+ }
+}
+
+func optimisticWriter(w io.Writer) func([]byte) {
+ return func(p []byte) {
+ _, err := w.Write(p)
+ if err != nil {
+ panic(err)
+ }
}
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go
index 58ae1c532..d38525efc 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go
@@ -73,15 +73,19 @@ func (s *sFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
return
}
+ var err error
rest := width - frameWidth
switch s.position {
case positionLeft:
- io.WriteString(w, frame+strings.Repeat(" ", rest))
+ _, err = io.WriteString(w, frame+strings.Repeat(" ", rest))
case positionRight:
- io.WriteString(w, strings.Repeat(" ", rest)+frame)
+ _, err = io.WriteString(w, strings.Repeat(" ", rest)+frame)
default:
str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2)
- io.WriteString(w, str)
+ _, err = io.WriteString(w, str)
+ }
+ if err != nil {
+ panic(err)
}
s.count++
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/bar_option.go b/vendor/github.com/vbauerster/mpb/v7/bar_option.go
index 660e7c487..4ba490505 100644
--- a/vendor/github.com/vbauerster/mpb/v7/bar_option.go
+++ b/vendor/github.com/vbauerster/mpb/v7/bar_option.go
@@ -89,7 +89,10 @@ func BarFillerOnComplete(message string) BarOption {
return BarFillerMiddleware(func(base BarFiller) BarFiller {
return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) {
if st.Completed {
- io.WriteString(w, message)
+ _, err := io.WriteString(w, message)
+ if err != nil {
+ panic(err)
+ }
} else {
base.Fill(w, reqWidth, st)
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
index 925c8b1dc..eaf541cb7 100644
--- a/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
+++ b/vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
@@ -76,9 +76,9 @@ func (w *Writer) GetWidth() (int, error) {
return tw, err
}
-func (w *Writer) ansiCuuAndEd() (err error) {
+func (w *Writer) ansiCuuAndEd() error {
buf := make([]byte, 8)
buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10)
- _, err = w.out.Write(append(buf, cuuAndEd...))
- return
+ _, err := w.out.Write(append(buf, cuuAndEd...))
+ return err
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go
new file mode 100644
index 000000000..ea9fda79d
--- /dev/null
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go
@@ -0,0 +1,12 @@
+package decor
+
+import "io"
+
+func optimisticStringWriter(w io.Writer) func(string) {
+ return func(s string) {
+ _, err := io.WriteString(w, s)
+ if err != nil {
+ panic(err)
+ }
+ }
+}
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go
index 2b0a7a956..6e7f5c6ed 100644
--- a/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/percentage.go
@@ -2,7 +2,6 @@ package decor
import (
"fmt"
- "io"
"strconv"
"github.com/vbauerster/mpb/v7/internal"
@@ -24,12 +23,12 @@ func (s percentageType) Format(st fmt.State, verb rune) {
}
}
- io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64))
-
+ osw := optimisticStringWriter(st)
+ osw(strconv.FormatFloat(float64(s), 'f', prec, 64))
if st.Flag(' ') {
- io.WriteString(st, " ")
+ osw(" ")
}
- io.WriteString(st, "%")
+ osw("%")
}
// Percentage returns percentage decorator. It's a wrapper of NewPercentage.
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go
index e4b974058..12879b8f1 100644
--- a/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/size_type.go
@@ -2,8 +2,6 @@ package decor
import (
"fmt"
- "io"
- "math"
"strconv"
)
@@ -47,16 +45,16 @@ func (self SizeB1024) Format(st fmt.State, verb rune) {
unit = _iMiB
case self < _iTiB:
unit = _iGiB
- case self <= math.MaxInt64:
+ default:
unit = _iTiB
}
- io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
-
+ osw := optimisticStringWriter(st)
+ osw(strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
if st.Flag(' ') {
- io.WriteString(st, " ")
+ osw(" ")
}
- io.WriteString(st, unit.String())
+ osw(unit.String())
}
const (
@@ -96,14 +94,14 @@ func (self SizeB1000) Format(st fmt.State, verb rune) {
unit = _MB
case self < _TB:
unit = _GB
- case self <= math.MaxInt64:
+ default:
unit = _TB
}
- io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
-
+ osw := optimisticStringWriter(st)
+ osw(strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
if st.Flag(' ') {
- io.WriteString(st, " ")
+ osw(" ")
}
- io.WriteString(st, unit.String())
+ osw(unit.String())
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go
index 634edabfd..99cfde2bf 100644
--- a/vendor/github.com/vbauerster/mpb/v7/decor/speed.go
+++ b/vendor/github.com/vbauerster/mpb/v7/decor/speed.go
@@ -2,7 +2,6 @@ package decor
import (
"fmt"
- "io"
"math"
"time"
@@ -24,7 +23,7 @@ type speedFormatter struct {
func (self *speedFormatter) Format(st fmt.State, verb rune) {
self.Formatter.Format(st, verb)
- io.WriteString(st, "/s")
+ optimisticStringWriter(st)("/s")
}
// EwmaSpeed exponential-weighted-moving-average based speed decorator.
diff --git a/vendor/github.com/vbauerster/mpb/v7/go.mod b/vendor/github.com/vbauerster/mpb/v7/go.mod
index 19a6f8044..8fa790dc7 100644
--- a/vendor/github.com/vbauerster/mpb/v7/go.mod
+++ b/vendor/github.com/vbauerster/mpb/v7/go.mod
@@ -4,7 +4,7 @@ require (
github.com/VividCortex/ewma v1.2.0
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/mattn/go-runewidth v0.0.13
- golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d
+ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
)
go 1.14
diff --git a/vendor/github.com/vbauerster/mpb/v7/go.sum b/vendor/github.com/vbauerster/mpb/v7/go.sum
index b4388d9ea..aebe4d9d2 100644
--- a/vendor/github.com/vbauerster/mpb/v7/go.sum
+++ b/vendor/github.com/vbauerster/mpb/v7/go.sum
@@ -6,5 +6,5 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d h1:1oIt9o40TWWI9FUaveVpUvBe13FNqBNVXy3ue2fcfkw=
-golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/vendor/github.com/vbauerster/mpb/v7/progress.go b/vendor/github.com/vbauerster/mpb/v7/progress.go
index 46485f719..123af17cf 100644
--- a/vendor/github.com/vbauerster/mpb/v7/progress.go
+++ b/vendor/github.com/vbauerster/mpb/v7/progress.go
@@ -6,8 +6,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
- "log"
"math"
"os"
"sync"
@@ -33,7 +31,6 @@ type Progress struct {
done chan struct{}
refreshCh chan time.Time
once sync.Once
- dlogger *log.Logger
}
// pState holds bars in its priorityQueue. It gets passed to
@@ -75,7 +72,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
rr: prr,
parkedBars: make(map[*Bar]*Bar),
output: os.Stdout,
- debugOut: ioutil.Discard,
}
for _, opt := range options {
@@ -91,7 +87,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
bwg: new(sync.WaitGroup),
operateState: make(chan func(*pState)),
done: make(chan struct{}),
- dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile),
}
p.cwg.Add(1)
@@ -234,12 +229,26 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) {
op(s)
case <-p.refreshCh:
if err := s.render(cw); err != nil {
- p.dlogger.Println(err)
+ if s.debugOut != nil {
+ _, e := fmt.Fprintln(s.debugOut, err)
+ if e != nil {
+ panic(err)
+ }
+ } else {
+ panic(err)
+ }
}
case <-s.shutdownNotifier:
for s.heapUpdated {
if err := s.render(cw); err != nil {
- p.dlogger.Println(err)
+ if s.debugOut != nil {
+ _, e := fmt.Fprintln(s.debugOut, err)
+ if e != nil {
+ panic(err)
+ }
+ } else {
+ panic(err)
+ }
}
}
return
@@ -311,7 +320,10 @@ func (s *pState) flush(cw *cwriter.Writer) error {
for s.bHeap.Len() > 0 {
b := heap.Pop(&s.bHeap).(*Bar)
frame := <-b.frameCh
- cw.ReadFrom(frame.reader)
+ _, err := cw.ReadFrom(frame.reader)
+ if err != nil {
+ return err
+ }
if b.toShutdown {
if b.recoveredPanic != nil {
s.barShutdownQueue = append(s.barShutdownQueue, b)
@@ -402,9 +414,9 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio
bs.priority = -(math.MaxInt32 - s.idCount)
}
- bs.bufP = bytes.NewBuffer(make([]byte, 0, 128))
- bs.bufB = bytes.NewBuffer(make([]byte, 0, 256))
- bs.bufA = bytes.NewBuffer(make([]byte, 0, 128))
+ for i := 0; i < len(bs.buffers); i++ {
+ bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512))
+ }
return bs
}
diff --git a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go
index a16f5ec8a..25f195bb8 100644
--- a/vendor/github.com/vbauerster/mpb/v7/proxyreader.go
+++ b/vendor/github.com/vbauerster/mpb/v7/proxyreader.go
@@ -11,7 +11,7 @@ type proxyReader struct {
bar *Bar
}
-func (x *proxyReader) Read(p []byte) (int, error) {
+func (x proxyReader) Read(p []byte) (int, error) {
n, err := x.ReadCloser.Read(p)
x.bar.IncrBy(n)
if err == io.EOF {
@@ -21,12 +21,11 @@ func (x *proxyReader) Read(p []byte) (int, error) {
}
type proxyWriterTo struct {
- io.ReadCloser // *proxyReader
- wt io.WriterTo
- bar *Bar
+ proxyReader
+ wt io.WriterTo
}
-func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) {
+func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) {
n, err := x.wt.WriteTo(w)
x.bar.IncrInt64(n)
if err == io.EOF {
@@ -36,13 +35,12 @@ func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) {
}
type ewmaProxyReader struct {
- io.ReadCloser // *proxyReader
- bar *Bar
+ proxyReader
}
-func (x *ewmaProxyReader) Read(p []byte) (int, error) {
+func (x ewmaProxyReader) Read(p []byte) (int, error) {
start := time.Now()
- n, err := x.ReadCloser.Read(p)
+ n, err := x.proxyReader.Read(p)
if n > 0 {
x.bar.DecoratorEwmaUpdate(time.Since(start))
}
@@ -50,12 +48,11 @@ func (x *ewmaProxyReader) Read(p []byte) (int, error) {
}
type ewmaProxyWriterTo struct {
- io.ReadCloser // *ewmaProxyReader
- wt io.WriterTo // *proxyWriterTo
- bar *Bar
+ ewmaProxyReader
+ wt proxyWriterTo
}
-func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) {
+func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) {
start := time.Now()
n, err := x.wt.WriteTo(w)
if n > 0 {
@@ -64,17 +61,19 @@ func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) {
return n, err
}
-func newProxyReader(r io.Reader, bar *Bar) io.ReadCloser {
- rc := toReadCloser(r)
- rc = &proxyReader{rc, bar}
-
- if wt, isWriterTo := r.(io.WriterTo); bar.hasEwmaDecorators {
- rc = &ewmaProxyReader{rc, bar}
- if isWriterTo {
- rc = &ewmaProxyWriterTo{rc, wt, bar}
+func (b *Bar) newProxyReader(r io.Reader) (rc io.ReadCloser) {
+ pr := proxyReader{toReadCloser(r), b}
+ if wt, ok := r.(io.WriterTo); ok {
+ pw := proxyWriterTo{pr, wt}
+ if b.hasEwmaDecorators {
+ rc = ewmaProxyWriterTo{ewmaProxyReader{pr}, pw}
+ } else {
+ rc = pw
}
- } else if isWriterTo {
- rc = &proxyWriterTo{rc, wt, bar}
+ } else if b.hasEwmaDecorators {
+ rc = ewmaProxyReader{pr}
+ } else {
+ rc = pr
}
return rc
}