summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman/images/build.go22
-rw-r--r--cmd/podman/secrets/inspect.go42
-rw-r--r--docs/source/markdown/podman-build.1.md.in8
-rw-r--r--docs/source/markdown/podman-secret-inspect.1.md4
-rw-r--r--docs/source/markdown/podman-system-service.1.md2
-rw-r--r--go.mod24
-rw-r--r--go.sum110
-rwxr-xr-xhack/libsubid_tag.sh2
-rw-r--r--libpod/events.go2
-rw-r--r--libpod/events/config.go2
-rw-r--r--libpod/events/events.go8
-rw-r--r--libpod/events/journal_linux.go6
-rw-r--r--pkg/api/handlers/compat/images_build.go15
-rw-r--r--pkg/bindings/images/build.go8
-rw-r--r--pkg/domain/entities/events.go5
-rw-r--r--pkg/domain/infra/abi/play.go22
-rw-r--r--pkg/systemd/notifyproxy/notifyproxy.go98
-rw-r--r--test/apiv2/27-containersEvents.at2
-rwxr-xr-xtest/buildah-bud/apply-podman-deltas3
-rw-r--r--test/e2e/events_test.go12
-rw-r--r--test/e2e/secret_test.go18
-rw-r--r--test/system/070-build.bats6
-rw-r--r--vendor/github.com/containers/buildah/add.go44
-rw-r--r--vendor/github.com/containers/buildah/bind/mount.go30
-rw-r--r--vendor/github.com/containers/buildah/buildah.go6
-rw-r--r--vendor/github.com/containers/buildah/chroot/pty_ptmx.go10
-rw-r--r--vendor/github.com/containers/buildah/chroot/run_common.go16
-rw-r--r--vendor/github.com/containers/buildah/chroot/run_freebsd.go22
-rw-r--r--vendor/github.com/containers/buildah/chroot/run_linux.go84
-rw-r--r--vendor/github.com/containers/buildah/chroot/seccomp.go18
-rw-r--r--vendor/github.com/containers/buildah/chroot/selinux.go2
-rw-r--r--vendor/github.com/containers/buildah/commit.go30
-rw-r--r--vendor/github.com/containers/buildah/config.go14
-rw-r--r--vendor/github.com/containers/buildah/copier/copier.go66
-rw-r--r--vendor/github.com/containers/buildah/copier/syscall_unix.go6
-rw-r--r--vendor/github.com/containers/buildah/copier/xattrs.go6
-rw-r--r--vendor/github.com/containers/buildah/define/build.go4
-rw-r--r--vendor/github.com/containers/buildah/define/types.go4
-rw-r--r--vendor/github.com/containers/buildah/delete.go2
-rw-r--r--vendor/github.com/containers/buildah/digester.go10
-rw-r--r--vendor/github.com/containers/buildah/image.go42
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go34
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/executor.go29
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/stage_executor.go88
-rw-r--r--vendor/github.com/containers/buildah/import.go18
-rw-r--r--vendor/github.com/containers/buildah/install.md6
-rw-r--r--vendor/github.com/containers/buildah/internal/parse/parse.go34
-rw-r--r--vendor/github.com/containers/buildah/mount.go12
-rw-r--r--vendor/github.com/containers/buildah/new.go18
-rw-r--r--vendor/github.com/containers/buildah/pkg/chrootuser/user.go8
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/build.go49
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go2
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go34
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse_unix.go4
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go2
-rw-r--r--vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go2
-rw-r--r--vendor/github.com/containers/buildah/push.go4
-rw-r--r--vendor/github.com/containers/buildah/run_common.go83
-rw-r--r--vendor/github.com/containers/buildah/run_freebsd.go10
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go56
-rw-r--r--vendor/github.com/containers/buildah/seccomp.go2
-rw-r--r--vendor/github.com/containers/buildah/unmount.go4
-rw-r--r--vendor/github.com/containers/buildah/util.go8
-rw-r--r--vendor/github.com/containers/buildah/util/util.go12
-rw-r--r--vendor/github.com/containers/common/libimage/image.go6
-rw-r--r--vendor/github.com/containers/common/libnetwork/types/network.go2
-rw-r--r--vendor/github.com/containers/common/pkg/config/config_linux.go4
-rw-r--r--vendor/github.com/containers/common/pkg/config/default.go6
-rw-r--r--vendor/github.com/containers/image/v5/pkg/docker/config/config.go15
-rw-r--r--vendor/github.com/containers/storage/containers.go19
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/aufs.go7
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/dirs.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go12
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/btrfs.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/btrfs/version_none.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/device_setup.go7
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/deviceset.go21
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/driver.go4
-rw-r--r--vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/driver.go2
-rw-r--r--vendor/github.com/containers/storage/drivers/fsdiff.go11
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/check.go13
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/jsoniter.go3
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go68
-rw-r--r--vendor/github.com/containers/storage/drivers/quota/projectquota.go5
-rw-r--r--vendor/github.com/containers/storage/drivers/windows/windows.go13
-rw-r--r--vendor/github.com/containers/storage/drivers/zfs/zfs.go2
-rw-r--r--vendor/github.com/containers/storage/images.go5
-rw-r--r--vendor/github.com/containers/storage/layers.go12
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/archive.go9
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/changes.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/copy.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/diff.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/cache_linux.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/chunked/storage_linux.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/directory/directory.go3
-rw-r--r--vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go4
-rw-r--r--vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go6
-rw-r--r--vendor/github.com/containers/storage/pkg/idtools/idtools.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/fswriters.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go7
-rw-r--r--vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go22
-rw-r--r--vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go5
-rw-r--r--vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go3
-rw-r--r--vendor/github.com/containers/storage/storage.conf2
-rw-r--r--vendor/github.com/containers/storage/store.go45
-rw-r--r--vendor/github.com/containers/storage/types/utils.go3
-rw-r--r--vendor/github.com/containers/storage/userns.go2
-rw-r--r--vendor/github.com/containers/storage/utils.go4
-rw-r--r--vendor/github.com/google/go-cmp/cmp/compare.go64
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go44
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/value/zero.go48
-rw-r--r--vendor/github.com/google/go-cmp/cmp/options.go10
-rw-r--r--vendor/github.com/google/go-cmp/cmp/path.go20
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_compare.go10
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_reflect.go11
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_slices.go25
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_text.go1
-rw-r--r--vendor/github.com/klauspost/compress/README.md6
-rw-r--r--vendor/github.com/klauspost/compress/flate/deflate.go3
-rw-r--r--vendor/github.com/klauspost/compress/flate/dict_decoder.go24
-rw-r--r--vendor/github.com/klauspost/compress/flate/fast_encoder.go3
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go20
-rw-r--r--vendor/github.com/klauspost/compress/flate/huffman_code.go15
-rw-r--r--vendor/github.com/klauspost/compress/flate/level5.go15
-rw-r--r--vendor/github.com/klauspost/compress/flate/level6.go21
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go36
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.go4
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.s1
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_generic.go18
-rw-r--r--vendor/github.com/klauspost/compress/internal/snapref/encode_other.go6
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md2
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go3
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bytebuf.go3
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go21
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder_options.go12
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_better.go23
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_dfast.go7
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go6
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go23
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s1
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go10
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s1
-rw-r--r--vendor/github.com/mistifyio/go-zfs/.gitignore1
-rw-r--r--vendor/github.com/mistifyio/go-zfs/.travis.yml43
-rw-r--r--vendor/github.com/mistifyio/go-zfs/Vagrantfile34
-rw-r--r--vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go17
-rw-r--r--vendor/github.com/mistifyio/go-zfs/utils_solaris.go17
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/.envrc4
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/.gitignore6
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml207
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/.yamllint16
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md250
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md (renamed from vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md)44
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/LICENSE (renamed from vendor/github.com/mistifyio/go-zfs/LICENSE)0
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/Makefile19
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/README.md (renamed from vendor/github.com/mistifyio/go-zfs/README.md)13
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile33
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/error.go (renamed from vendor/github.com/mistifyio/go-zfs/error.go)0
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/lint.mk75
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/rules.mk49
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/shell.nix26
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/utils.go (renamed from vendor/github.com/mistifyio/go-zfs/utils.go)90
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go19
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go19
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/zfs.go (renamed from vendor/github.com/mistifyio/go-zfs/zfs.go)165
-rw-r--r--vendor/github.com/mistifyio/go-zfs/v3/zpool.go (renamed from vendor/github.com/mistifyio/go-zfs/zpool.go)37
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go1
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go4
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go14
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go4
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go1
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go1
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/signer.go4
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go4
-rw-r--r--vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go4
-rw-r--r--vendor/golang.org/x/crypto/chacha20/chacha_generic.go4
-rw-r--r--vendor/golang.org/x/crypto/internal/alias/alias.go (renamed from vendor/golang.org/x/crypto/internal/subtle/aliasing.go)5
-rw-r--r--vendor/golang.org/x/crypto/internal/alias/alias_purego.go (renamed from vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go)5
-rw-r--r--vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go6
-rw-r--r--vendor/golang.org/x/crypto/ssh/agent/client.go4
-rw-r--r--vendor/golang.org/x/net/html/render.go4
-rw-r--r--vendor/golang.org/x/net/html/token.go4
-rw-r--r--vendor/golang.org/x/net/http2/frame.go22
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode.go2
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack.go16
-rw-r--r--vendor/golang.org/x/net/http2/http2.go8
-rw-r--r--vendor/golang.org/x/net/http2/server.go9
-rw-r--r--vendor/golang.org/x/net/http2/transport.go51
-rw-r--r--vendor/modules.txt34
201 files changed, 2336 insertions, 1373 deletions
diff --git a/cmd/podman/images/build.go b/cmd/podman/images/build.go
index a4d6614e2..473048834 100644
--- a/cmd/podman/images/build.go
+++ b/cmd/podman/images/build.go
@@ -18,6 +18,7 @@ import (
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
enchelpers "github.com/containers/ocicrypt/helpers"
"github.com/containers/podman/v4/cmd/podman/common"
@@ -205,6 +206,24 @@ func build(cmd *cobra.Command, args []string) error {
return errors.New("'--output' option is not supported in remote mode")
}
+ if buildOpts.Network == "none" {
+ if cmd.Flag("dns").Changed {
+ return errors.New("the --dns option cannot be used with --network=none")
+ }
+ if cmd.Flag("dns-option").Changed {
+ return errors.New("the --dns-option option cannot be used with --network=none")
+ }
+ if cmd.Flag("dns-search").Changed {
+ return errors.New("the --dns-search option cannot be used with --network=none")
+ }
+ }
+
+ if cmd.Flag("network").Changed {
+ if buildOpts.Network != "host" && buildOpts.Isolation == buildahDefine.IsolationChroot.String() {
+ return fmt.Errorf("cannot set --network other than host with --isolation %s", buildOpts.Isolation)
+ }
+ }
+
// Extract container files from the CLI (i.e., --file/-f) first.
var containerFiles []string
for _, f := range buildOpts.File {
@@ -613,6 +632,9 @@ func buildFlagsWrapperToOptions(c *cobra.Command, contextDir string, flags *buil
timestamp := time.Unix(flags.Timestamp, 0).UTC()
opts.Timestamp = &timestamp
}
+ if c.Flag("skip-unused-stages").Changed {
+ opts.SkipUnusedStages = types.NewOptionalBool(flags.SkipUnusedStages)
+ }
return &entities.BuildOptions{BuildOptions: opts}, nil
}
diff --git a/cmd/podman/secrets/inspect.go b/cmd/podman/secrets/inspect.go
index f4c395b0f..9054fc3b0 100644
--- a/cmd/podman/secrets/inspect.go
+++ b/cmd/podman/secrets/inspect.go
@@ -25,7 +25,23 @@ var (
}
)
-var format string
+var (
+ format string
+ pretty bool
+)
+
+const (
+ prettyTemplate = `ID: {{.ID}}
+Name: {{.Spec.Name}}
+{{- if .Spec.Labels }}
+Labels:
+{{- range $k, $v := .Spec.Labels }}
+ - {{ $k }}{{if $v }}={{ $v }}{{ end }}
+{{- end }}{{ end }}
+Driver: {{.Spec.Driver.Name}}
+Created at: {{.CreatedAt}}
+Updated at: {{.UpdatedAt}}`
+)
func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
@@ -34,8 +50,11 @@ func init() {
})
flags := inspectCmd.Flags()
formatFlagName := "format"
- flags.StringVarP(&format, formatFlagName, "f", "", "Format volume output using Go template")
+ flags.StringVarP(&format, formatFlagName, "f", "", "Format inspect output using Go template")
_ = inspectCmd.RegisterFlagCompletionFunc(formatFlagName, common.AutocompleteFormat(&entities.SecretInfoReport{}))
+
+ prettyFlagName := "pretty"
+ flags.BoolVar(&pretty, prettyFlagName, false, "Print inspect output in human-readable format")
}
func inspect(cmd *cobra.Command, args []string) error {
@@ -46,7 +65,21 @@ func inspect(cmd *cobra.Command, args []string) error {
inspected = []*entities.SecretInfoReport{}
}
- if cmd.Flags().Changed("format") {
+ switch {
+ case cmd.Flags().Changed("pretty"):
+ rpt := report.New(os.Stdout, cmd.Name())
+ defer rpt.Flush()
+
+ rpt, err := rpt.Parse(report.OriginUser, prettyTemplate)
+ if err != nil {
+ return err
+ }
+
+ if err := rpt.Execute(inspected); err != nil {
+ return err
+ }
+
+ case cmd.Flags().Changed("format"):
rpt := report.New(os.Stdout, cmd.Name())
defer rpt.Flush()
@@ -58,7 +91,8 @@ func inspect(cmd *cobra.Command, args []string) error {
if err := rpt.Execute(inspected); err != nil {
return err
}
- } else {
+
+ default:
buf, err := json.MarshalIndent(inspected, "", " ")
if err != nil {
return err
diff --git a/docs/source/markdown/podman-build.1.md.in b/docs/source/markdown/podman-build.1.md.in
index e201806e5..e1ef13a0d 100644
--- a/docs/source/markdown/podman-build.1.md.in
+++ b/docs/source/markdown/podman-build.1.md.in
@@ -145,6 +145,10 @@ Limit the use of cached images to only consider images with created timestamps l
For example if `--cache-ttl=1h` is specified, Buildah will only consider intermediate cache images which are created
under the duration of one hour, and intermediate cache images outside this duration will be ignored.
+Note: Setting `--cache-ttl=0` manually is equivalent to using `--no-cache` in the
+implementation since this would effectively mean that user is not willing to use
+cache at all.
+
#### **--cap-add**=*CAP\_xxx*
When executing RUN instructions, run the command specified in the instruction
@@ -564,6 +568,10 @@ as a seccomp filter
Sign the image using a GPG key with the specified FINGERPRINT. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines,)
+#### **--skip-unused-stages**
+
+Skip stages in multi-stage builds which don't affect the target stage. (Default: **true**).
+
#### **--squash**
Squash all of the image's new layers into a single new layer; any preexisting
diff --git a/docs/source/markdown/podman-secret-inspect.1.md b/docs/source/markdown/podman-secret-inspect.1.md
index 77d9276bd..b5bcd2b92 100644
--- a/docs/source/markdown/podman-secret-inspect.1.md
+++ b/docs/source/markdown/podman-secret-inspect.1.md
@@ -34,6 +34,10 @@ Format secret output using Go template.
Print usage statement.
+#### **--pretty**
+
+Print inspect output in human-readable format
+
## EXAMPLES
diff --git a/docs/source/markdown/podman-system-service.1.md b/docs/source/markdown/podman-system-service.1.md
index 2ec48aeb4..2293dea0a 100644
--- a/docs/source/markdown/podman-system-service.1.md
+++ b/docs/source/markdown/podman-system-service.1.md
@@ -1,4 +1,4 @@
-% podman-service 1
+% podman-system-service 1
## NAME
podman\-system\-service - Run an API service
diff --git a/go.mod b/go.mod
index bbc7014a7..481397791 100644
--- a/go.mod
+++ b/go.mod
@@ -11,13 +11,13 @@ require (
github.com/container-orchestrated-devices/container-device-interface v0.5.1
github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.1.1
- github.com/containers/buildah v1.27.1-0.20220907121344-97a52b13bb27
- github.com/containers/common v0.49.2-0.20220909190843-e5685792b5d7
+ github.com/containers/buildah v1.27.1-0.20220921131114-d3064796af36
+ github.com/containers/common v0.49.2-0.20220920205255-8062f81c5497
github.com/containers/conmon v2.0.20+incompatible
- github.com/containers/image/v5 v5.22.1-0.20220907162003-651744379993
+ github.com/containers/image/v5 v5.22.1-0.20220919112403-fe51f7ffca50
github.com/containers/ocicrypt v1.1.5
github.com/containers/psgo v1.7.3
- github.com/containers/storage v1.42.1-0.20220911223137-e11b246de159
+ github.com/containers/storage v1.42.1-0.20220919112236-8a581aac3bdf
github.com/coreos/go-systemd/v22 v22.4.0
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3
@@ -93,15 +93,14 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
- github.com/google/go-cmp v0.5.8 // indirect
+ github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-containerregistry v0.11.0 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
- github.com/honeycombio/libhoney-go v1.15.8 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jinzhu/copier v0.3.5 // indirect
- github.com/klauspost/compress v1.15.9 // indirect
+ github.com/klauspost/compress v1.15.10 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e // indirect
@@ -109,7 +108,7 @@ require (
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
- github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
+ github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
github.com/moby/sys/mount v0.3.3 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -123,21 +122,22 @@ require (
github.com/rivo/uniseg v0.2.0 // indirect
github.com/rogpeppe/go-internal v1.8.0 // indirect
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
- github.com/sigstore/sigstore v1.4.0 // indirect
+ github.com/sigstore/sigstore v1.4.1 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.7.2 // indirect
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
- github.com/theupdateframework/go-tuf v0.4.0 // indirect
+ github.com/theupdateframework/go-tuf v0.5.0 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
+ github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.23.0 // indirect
- golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
- golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
+ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
+ golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b // indirect
golang.org/x/tools v0.1.12 // indirect
google.golang.org/genproto v0.0.0-20220720214146-176da50484ac // indirect
google.golang.org/grpc v1.48.0 // indirect
diff --git a/go.sum b/go.sum
index 3815652b7..60355bc0c 100644
--- a/go.sum
+++ b/go.sum
@@ -86,7 +86,6 @@ github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
-github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
@@ -155,13 +154,11 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
-github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/ReneKroon/ttlcache/v2 v2.11.0/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
@@ -185,7 +182,6 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
-github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
@@ -196,32 +192,33 @@ github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZo
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.44.44/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.76/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
-github.com/aws/aws-sdk-go-v2 v1.16.5/go.mod h1:Wh7MEsmEApyL5hrWzpDkba4gwAPc5/piwLVLFnCxp48=
+github.com/aws/aws-sdk-go v1.44.96/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v1.16.11/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo=
-github.com/aws/aws-sdk-go-v2/config v1.15.11/go.mod h1:mD5tNFciV7YHNjPpFYqJ6KGpoSfY107oZULvTHIxtbI=
+github.com/aws/aws-sdk-go-v2 v1.16.14/go.mod h1:s/G+UV29dECbF5rf+RNj1xhlmvoNurGSr+McVSRj59w=
github.com/aws/aws-sdk-go-v2/config v1.17.0/go.mod h1:4SKzBMiB8lV0fw2w7eDBo/LjQyHFITN4vUUuqpurFmI=
-github.com/aws/aws-sdk-go-v2/credentials v1.12.6/go.mod h1:mQgnRmBPF2S/M01W4T4Obp3ZaZB6o1s/R8cOUda9vtI=
+github.com/aws/aws-sdk-go-v2/config v1.17.5/go.mod h1:H0cvPNDO3uExWts/9PDhD/0ne2esu1uaIulwn1vkwxM=
github.com/aws/aws-sdk-go-v2/credentials v1.12.13/go.mod h1:9fDEemXizwXrxPU1MTzv69LP/9D8HVl5qHAQO9A9ikY=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6/go.mod h1:ClLMcuQA/wcHPmOIfNzNI4Y1Q0oDbmEkbYhMFOzHDh8=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.18/go.mod h1:O7n/CPagQ33rfG6h7vR/W02ammuc5CrsSM22cNZp9so=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.12/go.mod h1:aZ4vZnyUuxedC7eD4JyEHpGnCz+O2sHQEx3VvAwklSE=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12/go.mod h1:Afj/U8svX6sJ77Q+FPWMzabJ9QjbwP32YlopgKALUpg=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.15/go.mod h1:Oz2/qWINxIgSmoZT9adpxJy2UhpcOAI3TIyWgYMVSz0=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.18/go.mod h1:348MLhzV1GSlZSMusdwQpXKbhD7X2gbI/TxwAPKkYZQ=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6/go.mod h1:FwpAKI+FBPIELJIdmQzlLtRe8LQSOreMcM2wBsPMvvc=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.21/go.mod h1:XsmHMV9c512xgsW01q7H0ut+UQQQpWX8QsFbdLHDwaU=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.12/go.mod h1:ckaCVTEdGAxO6KwTGzgskxR1xM+iJW4lxMyDFVda2Fc=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.13/go.mod h1:hiM/y1XPp3DoEPhoVEYc/CZcS58dP6RKJRDFp99wdX0=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.15/go.mod h1:kjJ4CyD9M3Wq88GYg3IPfj67Rs0Uvz8aXK7MJ8BvE4I=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.19/go.mod h1:cVHo8KTuHjShb9V8/VjH3S/8+xPu16qx8fdGwmotJhE=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6/go.mod h1:DxAPjquoEHf3rUHh1b9+47RAaXB8/7cB6jkzCt/GOEI=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.22/go.mod h1:tltHVGy977LrSOgRR5aV9+miyno/Gul/uJNPKS7FzP4=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.12/go.mod h1:1TODGhheLWjpQWSuhYuAUWYTCKwEjx2iblIFKDHjeTc=
-github.com/aws/aws-sdk-go-v2/service/kms v1.17.3/go.mod h1:EKkrWWXwWYf8x3Nrm6Oix3zZP9NRBHqxw5buFGVBHA0=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.15/go.mod h1:ZVJ7ejRl4+tkWMuCwjXoy0jd8fF5u3RCyWjSVjUIvQE=
github.com/aws/aws-sdk-go-v2/service/kms v1.18.4/go.mod h1:WG8HUJKtDqXJM3+CNZeN+2wvdcJb5vprKo01fr1KQW4=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.9/go.mod h1:UqRD9bBt15P0ofRyDZX6CfsIqPpzeHOhZKWzgSuAzpo=
+github.com/aws/aws-sdk-go-v2/service/kms v1.18.9/go.mod h1:8sR6O18d56mlJf0VkYD7mOtrBoM//8eym7FcfG1t9Sc=
github.com/aws/aws-sdk-go-v2/service/sso v1.11.16/go.mod h1:mS5xqLZc/6kc06IpXn5vRxdLaED+jEuaSRv5BxtnsiY=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.7/go.mod h1:lVxTdiiSHY3jb1aeg+BBFtDzZGSUCv6qaNOyEGCJ1AY=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.21/go.mod h1:q8nYq51W3gpZempYsAD83fPRlrOTMCwN+Ahg4BKFTXQ=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.3/go.mod h1:+IF75RMJh0+zqTGXGshyEGRsU2ImqWv6UuHGkHl6kEo=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.13/go.mod h1:Ru3QVMLygVs/07UQ3YDur1AQZZp2tUNje8wfloFttC0=
-github.com/aws/smithy-go v1.11.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.17/go.mod h1:bQujK1n0V1D1Gz5uII1jaB1WDvhj4/T3tElsJnVXCR0=
github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.13.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af/go.mod h1:84CWnaDz4g1tEVnFLnuBigmGK15oPohy0RfvSN8d4eg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
@@ -391,7 +388,6 @@ github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oM
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
-github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM=
github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
@@ -421,16 +417,15 @@ github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRD
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
github.com/containernetworking/plugins v1.1.1 h1:+AGfFigZ5TiQH00vhR8qPeSatj53eNGz0C1d3wVYlHE=
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
-github.com/containers/buildah v1.27.1-0.20220907121344-97a52b13bb27 h1:LRgKJ/JUd6iTocPg/q7oMZ9ilnbew50JXClXgiEoR9Q=
-github.com/containers/buildah v1.27.1-0.20220907121344-97a52b13bb27/go.mod h1:0iWhIkE70dkoVuwpmZy5/DXpBdI3C23iYmBQccTDWMU=
-github.com/containers/common v0.49.1/go.mod h1:ueM5hT0itKqCQvVJDs+EtjornAQtrHYxQJzP2gxeGIg=
-github.com/containers/common v0.49.2-0.20220909190843-e5685792b5d7 h1:iSrqOya92AllZSA7y64Aamfcr4iOxgf4iatc9uFeL0U=
-github.com/containers/common v0.49.2-0.20220909190843-e5685792b5d7/go.mod h1:HaPvle8BvLTyjtY9B4HJoNCl60DpHwCDLA2FsZTWaak=
+github.com/containers/buildah v1.27.1-0.20220921131114-d3064796af36 h1:LTSEbPUbs0slJSJ+IH6atAjYDe0IDzA0sPgBLjT1yAo=
+github.com/containers/buildah v1.27.1-0.20220921131114-d3064796af36/go.mod h1:cY3pGPyMmrNp/sEDK8ESoBOf4hoNovptZSI0oyo8eQM=
+github.com/containers/common v0.49.2-0.20220920205255-8062f81c5497 h1:LB9SxcAglqSAHiiHGacN1Abi0ZL9haJpQ1numVlqtxM=
+github.com/containers/common v0.49.2-0.20220920205255-8062f81c5497/go.mod h1:ZnhOPR/07UOkfIg5bezUpBilGjxEUdaeoUpu7gRBGc0=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
-github.com/containers/image/v5 v5.22.0/go.mod h1:D8Ksv2RNB8qLJ7xe1P3rgJJOSQpahA6amv2Ax++/YO4=
-github.com/containers/image/v5 v5.22.1-0.20220907162003-651744379993 h1:Dsga+pCSq+2JwIaHaxilCWrLpzSQIbBdovTCHb8zPag=
github.com/containers/image/v5 v5.22.1-0.20220907162003-651744379993/go.mod h1:/Ruurd87C6Ap45t1PWNOD8+SGwiZbk79XCgs1iUTvYA=
+github.com/containers/image/v5 v5.22.1-0.20220919112403-fe51f7ffca50 h1:tUKgZs27sh7EdolUQdjcyOF2+XPmCJF6/797+mkJZ0M=
+github.com/containers/image/v5 v5.22.1-0.20220919112403-fe51f7ffca50/go.mod h1:yA5N2XShBHbvlhp51kNABiFPmnRLpDRHV++zmMQn3eE=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@@ -445,8 +440,8 @@ github.com/containers/psgo v1.7.3/go.mod h1:PfaNzzHmMb8M9/blPgyD4BB3ZEj/0ApZIxN6
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
github.com/containers/storage v1.42.0/go.mod h1:JiUJwOgOo1dr2DdOUc1MRe2GCAXABYoYmOdPF8yvH78=
github.com/containers/storage v1.42.1-0.20220907083030-5aff7f62e8d0/go.mod h1:nj2fW3rgwkr6toBVFzv5OqUYs1kowX+AMiPjgv2UXN0=
-github.com/containers/storage v1.42.1-0.20220911223137-e11b246de159 h1:euFlaTBhuBLuUUQK4wGXjruNUh24ZbdQLREvLz15r9o=
-github.com/containers/storage v1.42.1-0.20220911223137-e11b246de159/go.mod h1:nj2fW3rgwkr6toBVFzv5OqUYs1kowX+AMiPjgv2UXN0=
+github.com/containers/storage v1.42.1-0.20220919112236-8a581aac3bdf h1:CnGYVAFSZgwYdYHnJJhVnhxtrggWsgwooG/Be1LxWGI=
+github.com/containers/storage v1.42.1-0.20220919112236-8a581aac3bdf/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -456,6 +451,7 @@ github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka
github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-oidc/v3 v3.2.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo=
+github.com/coreos/go-oidc/v3 v3.4.0/go.mod h1:eHUXhZtXPQLgEaDrOVTgwbgmz1xGOkJNye6h3zkD2Pw=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
@@ -509,7 +505,6 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
-github.com/docker/cli v20.10.16+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
@@ -518,7 +513,6 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc=
github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
@@ -544,7 +538,6 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eggsampler/acme/v3 v3.2.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo=
github.com/eggsampler/acme/v3 v3.3.0/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@@ -656,10 +649,9 @@ github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTM
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
-github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
-github.com/go-rod/rod v0.107.3/go.mod h1:4SqYRUrcc4dSr9iT36YRZ4hdUAPg3A0O8RhxAMh0eCQ=
github.com/go-rod/rod v0.109.1/go.mod h1:GZDtmEs6RpF6kBRYpGCZXxXlKNneKVPiKOjaMbmVVjE=
+github.com/go-rod/rod v0.109.3/go.mod h1:GZDtmEs6RpF6kBRYpGCZXxXlKNneKVPiKOjaMbmVVjE=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
@@ -818,10 +810,10 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
-github.com/google/go-containerregistry v0.10.0/go.mod h1:C7uwbB1QUAtvnknyd3ethxJRd4gtEjU/9WLXzckfI1Y=
github.com/google/go-containerregistry v0.11.0 h1:Xt8x1adcREjFcmDoDK8OdOsjxu90PHkGuwNP8GiHMLM=
github.com/google/go-containerregistry v0.11.0/go.mod h1:BBaYtsHPHA42uEgAvd/NejvAfPSlz281sJWqupjSxfk=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
@@ -931,7 +923,6 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
@@ -946,7 +937,6 @@ github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ3
github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
-github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
@@ -968,7 +958,6 @@ github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -986,7 +975,6 @@ github.com/hashicorp/vault/api v1.7.2/go.mod h1:xbfA+1AvxFseDzxxdWaL0uO99n1+tndu
github.com/hashicorp/vault/sdk v0.5.1/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU=
github.com/hashicorp/vault/sdk v0.5.3/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
-github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/hashicorp/yamux v0.1.0/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/honeycombio/beeline-go v1.1.1/go.mod h1:kN0cfUGBMfA87DyCYbiiLoSzWsnw3bluZvNEWtatHxk=
github.com/honeycombio/beeline-go v1.9.0 h1:MM/20HBFE2AKno0N9bOgEicSHHiTkTklGdtjdtAEWbk=
@@ -1131,12 +1119,11 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
-github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.10 h1:Ai8UzuomSCDw90e1qNMtb15msBXsNpH6gzkkENQNcJo=
+github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -1167,7 +1154,6 @@ github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3
github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
-github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e/go.mod h1:Bl3mfF2LHYepsU2XfzMceIglyByfPe1IFAXtO+p37Qk=
github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e h1:2ba+yBBeT8ZFyZjRLPDKvkqVrWX4CCYAuR6nuJGojD0=
github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e/go.mod h1:54WQpg5QI0mpRhxoj9bxysLqA5WJylVsLtXOrb3zAiU=
github.com/letsencrypt/challtestsrv v1.2.1/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc=
@@ -1260,7 +1246,6 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
-github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
@@ -1268,6 +1253,8 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mistifyio/go-zfs/v3 v3.0.0 h1:J5QK618xRcXnQYZ2GE5FdmpS1ufIrWue+lR/mpe6/14=
+github.com/mistifyio/go-zfs/v3 v3.0.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
@@ -1368,8 +1355,9 @@ github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
-github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
+github.com/onsi/ginkgo/v2 v2.2.0 h1:3ZNA3L1c5FYDFTTxbFeVGGD8jYvjYauHD30YgLxVsNI=
+github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -1384,7 +1372,6 @@ github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
-github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
github.com/onsi/gomega v1.20.2 h1:8uQq0zMgLEfa0vRrrBgaJF2gyW9Da9BmfGV+OyUzfkY=
github.com/onsi/gomega v1.20.2/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
@@ -1558,7 +1545,6 @@ github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624
github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
-github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U=
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc=
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
@@ -1573,9 +1559,9 @@ github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1/go.mod h1:y83NePRM98MJpbGgBgi54UZduhG0aD7lYngAVCx+i/E=
-github.com/sigstore/sigstore v1.4.0 h1:5A3eUhbSQkhiqJNUPi/2UMKdTyb3NKfWcVjaTBkkaJk=
github.com/sigstore/sigstore v1.4.0/go.mod h1:z3kt1jm2A39M+g7emkQ8jdErL/haCMEjkNxvqTf41/k=
+github.com/sigstore/sigstore v1.4.1 h1:e/tfXseQRymIjgiykskciGrp75AZVCfYokZ2r9tg5vw=
+github.com/sigstore/sigstore v1.4.1/go.mod h1:4+s4d6oTDdoQkf5lwpZBoOlWWV+hXhur1my9WdN5PjU=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -1650,11 +1636,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
-github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/sylabs/sif/v2 v2.7.1/go.mod h1:bBse2nEFd3yHkmq6KmAOFEWQg5LdFYiQUdVcgamxlc8=
github.com/sylabs/sif/v2 v2.7.2 h1:eCxtl2ub9fPfrO7g2JPagn6HKDhv+Kl92Jz6+ww2Y1Q=
github.com/sylabs/sif/v2 v2.7.2/go.mod h1:LQOdYXC9a8i7BleTKRw9lohi0rTbXkJOeS9u0ebvgyM=
github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
@@ -1672,10 +1656,10 @@ github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0
github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
-github.com/theupdateframework/go-tuf v0.3.0/go.mod h1:E5XP0wXitrFUHe4b8cUcAAdxBW4LbfnqF4WXXGLgWNo=
github.com/theupdateframework/go-tuf v0.3.1/go.mod h1:lhHZ3Vt2pdAh15h0Cc6gWdlI+Okn2ZznD3q/cNjd5jw=
-github.com/theupdateframework/go-tuf v0.4.0 h1:KTFzOsIdtmMe4KTEOEcDdD68uXYf2hDYq+Bh+hU7yWU=
github.com/theupdateframework/go-tuf v0.4.0/go.mod h1:kfQTX2LhNeK/cp03OBiZHoua0Pp2l9w4ShRwFtc0oKg=
+github.com/theupdateframework/go-tuf v0.5.0 h1:aQ7i9CBw4q9QEZifCaW6G8qGQwoN23XGaZkOA+F50z4=
+github.com/theupdateframework/go-tuf v0.5.0/go.mod h1:vAqWV3zEs89byeFsAYoh/Q14vJTgJkHwnnRCWBBBINY=
github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
@@ -1718,7 +1702,6 @@ github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/V
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
-github.com/vbauerster/mpb/v7 v7.4.2/go.mod h1:UmOiIUI8aPqWXIps0ciik3RKMdzx7+ooQpq+fBcXwBA=
github.com/vbauerster/mpb/v7 v7.5.2/go.mod h1:UmOiIUI8aPqWXIps0ciik3RKMdzx7+ooQpq+fBcXwBA=
github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w=
github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE=
@@ -1742,7 +1725,6 @@ github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgq
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/weppos/publicsuffix-go v0.15.1-0.20210807195340-dc689ff0bb59/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
-github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/weppos/publicsuffix-go v0.15.1-0.20220413065649-906f534b73a4/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
@@ -1759,12 +1741,10 @@ github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1z
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc=
github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
-github.com/ysmood/got v0.31.2/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY=
github.com/ysmood/got v0.31.3/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY=
github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM=
github.com/ysmood/gson v0.7.1/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
github.com/ysmood/gson v0.7.2/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
-github.com/ysmood/leakless v0.7.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
@@ -1890,11 +1870,10 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM=
+golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -2003,7 +1982,6 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -2013,13 +1991,13 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220524220425-1d687d428aca/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY=
+golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -2040,10 +2018,10 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -2056,7 +2034,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -2211,8 +2188,10 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220907062415-87db552b00fd/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2 h1:wM1k/lXfpc5HdkJJyW9GELpd8ERGdnh8sMGL6Gzq3Ho=
golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@@ -2241,7 +2220,6 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ=
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -2417,6 +2395,7 @@ google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3p
google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/api v0.92.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
+google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -2606,7 +2585,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
diff --git a/hack/libsubid_tag.sh b/hack/libsubid_tag.sh
index 31412b3e6..137826484 100755
--- a/hack/libsubid_tag.sh
+++ b/hack/libsubid_tag.sh
@@ -5,7 +5,7 @@ fi
tmpdir="$PWD/tmp.$RANDOM"
mkdir -p "$tmpdir"
trap 'rm -fr "$tmpdir"' EXIT
-cc -o "$tmpdir"/libsubid_tag -l subid -x c - > /dev/null 2> /dev/null << EOF
+cc -o "$tmpdir"/libsubid_tag -x c - -l subid > /dev/null 2> /dev/null << EOF
#include <shadow/subid.h>
#include <stdio.h>
#include <stdlib.h>
diff --git a/libpod/events.go b/libpod/events.go
index 2f9799114..31a857a7d 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -34,6 +34,7 @@ func (c *Container) newContainerEvent(status events.Status) {
e.Details = events.Details{
ID: e.ID,
+ PodID: c.PodID(),
Attributes: c.Labels(),
}
@@ -59,6 +60,7 @@ func (c *Container) newContainerExitedEvent(exitCode int32) {
e.Name = c.Name()
e.Image = c.config.RootfsImageName
e.Type = events.Container
+ e.PodID = c.PodID()
e.ContainerExitCode = int(exitCode)
e.Details = events.Details{
diff --git a/libpod/events/config.go b/libpod/events/config.go
index 4ea45a00e..28bc87a34 100644
--- a/libpod/events/config.go
+++ b/libpod/events/config.go
@@ -50,6 +50,8 @@ type Event struct {
type Details struct {
// ID is the event ID
ID string
+ // PodID is the ID of the pod associated with the container.
+ PodID string `json:",omitempty"`
// Attributes can be used to describe specifics about the event
// in the case of a container event, labels for example
Attributes map[string]string
diff --git a/libpod/events/events.go b/libpod/events/events.go
index 764481e51..2105a3b89 100644
--- a/libpod/events/events.go
+++ b/libpod/events/events.go
@@ -76,7 +76,13 @@ func (e *Event) ToHumanReadable(truncate bool) string {
}
switch e.Type {
case Container, Pod:
- humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s, health_status=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name, e.HealthStatus)
+ humanFormat = fmt.Sprintf("%s %s %s %s (image=%s, name=%s", e.Time, e.Type, e.Status, id, e.Image, e.Name)
+ if e.PodID != "" {
+ humanFormat += fmt.Sprintf(", pod_id=%s", e.PodID)
+ }
+ if e.HealthStatus != "" {
+ humanFormat += fmt.Sprintf(", health_status=%s", e.HealthStatus)
+ }
// check if the container has labels and add it to the output
if len(e.Attributes) > 0 {
for k, v := range e.Attributes {
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 4986502a2..e303a205d 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -50,6 +50,9 @@ func (e EventJournalD) Write(ee Event) error {
if ee.ContainerExitCode != 0 {
m["PODMAN_EXIT_CODE"] = strconv.Itoa(ee.ContainerExitCode)
}
+ if ee.PodID != "" {
+ m["PODMAN_POD_ID"] = ee.PodID
+ }
// If we have container labels, we need to convert them to a string so they
// can be recorded with the event
if len(ee.Details.Attributes) > 0 {
@@ -161,6 +164,7 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) {
case Container, Pod:
newEvent.ID = entry.Fields["PODMAN_ID"]
newEvent.Image = entry.Fields["PODMAN_IMAGE"]
+ newEvent.PodID = entry.Fields["PODMAN_POD_ID"]
if code, ok := entry.Fields["PODMAN_EXIT_CODE"]; ok {
intCode, err := strconv.Atoi(code)
if err != nil {
@@ -179,7 +183,7 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) {
// if we have labels, add them to the event
if len(labels) > 0 {
- newEvent.Details = Details{Attributes: labels}
+ newEvent.Attributes = labels
}
}
newEvent.HealthStatus = entry.Fields["PODMAN_HEALTH_STATUS"]
diff --git a/pkg/api/handlers/compat/images_build.go b/pkg/api/handlers/compat/images_build.go
index 287011798..d4c8c0743 100644
--- a/pkg/api/handlers/compat/images_build.go
+++ b/pkg/api/handlers/compat/images_build.go
@@ -130,6 +130,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Secrets string `schema:"secrets"`
SecurityOpt string `schema:"securityopt"`
ShmSize int `schema:"shmsize"`
+ SkipUnusedStages bool `schema:"skipunusedstages"`
Squash bool `schema:"squash"`
TLSVerify bool `schema:"tlsVerify"`
Tags []string `schema:"t"`
@@ -138,12 +139,13 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
Ulimits string `schema:"ulimits"`
UnsetEnvs []string `schema:"unsetenv"`
}{
- Dockerfile: "Dockerfile",
- IdentityLabel: true,
- Registry: "docker.io",
- Rm: true,
- ShmSize: 64 * 1024 * 1024,
- TLSVerify: true,
+ Dockerfile: "Dockerfile",
+ IdentityLabel: true,
+ Registry: "docker.io",
+ Rm: true,
+ ShmSize: 64 * 1024 * 1024,
+ TLSVerify: true,
+ SkipUnusedStages: true,
}
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
@@ -675,6 +677,7 @@ func BuildImage(w http.ResponseWriter, r *http.Request) {
RemoveIntermediateCtrs: query.Rm,
ReportWriter: reporter,
RusageLogFile: query.RusageLogFile,
+ SkipUnusedStages: types.NewOptionalBool(query.SkipUnusedStages),
Squash: query.Squash,
SystemContext: systemContext,
Target: query.Target,
diff --git a/pkg/bindings/images/build.go b/pkg/bindings/images/build.go
index 260d977a8..f8552cddb 100644
--- a/pkg/bindings/images/build.go
+++ b/pkg/bindings/images/build.go
@@ -233,6 +233,14 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO
if options.CacheFrom != nil {
params.Set("cachefrom", options.CacheFrom.String())
}
+
+ switch options.SkipUnusedStages {
+ case types.OptionalBoolTrue:
+ params.Set("skipunusedstages", "1")
+ case types.OptionalBoolFalse:
+ params.Set("skipunusedstages", "0")
+ }
+
if options.CacheTo != nil {
params.Set("cacheto", options.CacheTo.String())
}
diff --git a/pkg/domain/entities/events.go b/pkg/domain/entities/events.go
index de218b285..34a6fe048 100644
--- a/pkg/domain/entities/events.go
+++ b/pkg/domain/entities/events.go
@@ -14,7 +14,7 @@ type Event struct {
// TODO: it would be nice to have full control over the types at some
// point and fork such Docker types.
dockerEvents.Message
- HealthStatus string
+ HealthStatus string `json:",omitempty"`
}
// ConvertToLibpodEvent converts an entities event to a libpod one.
@@ -34,6 +34,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event {
image := e.Actor.Attributes["image"]
name := e.Actor.Attributes["name"]
details := e.Actor.Attributes
+ podID := e.Actor.Attributes["podId"]
delete(details, "image")
delete(details, "name")
delete(details, "containerExitCode")
@@ -47,6 +48,7 @@ func ConvertToLibpodEvent(e Event) *libpodEvents.Event {
Type: t,
HealthStatus: e.HealthStatus,
Details: libpodEvents.Details{
+ PodID: podID,
Attributes: details,
},
}
@@ -61,6 +63,7 @@ func ConvertToEntitiesEvent(e libpodEvents.Event) *Event {
attributes["image"] = e.Image
attributes["name"] = e.Name
attributes["containerExitCode"] = strconv.Itoa(e.ContainerExitCode)
+ attributes["podId"] = e.PodID
message := dockerEvents.Message{
// Compatibility with clients that still look for deprecated API elements
Status: e.Status.String(),
diff --git a/pkg/domain/infra/abi/play.go b/pkg/domain/infra/abi/play.go
index 847e81e69..bd9117f72 100644
--- a/pkg/domain/infra/abi/play.go
+++ b/pkg/domain/infra/abi/play.go
@@ -10,6 +10,7 @@ import (
"path/filepath"
"strconv"
"strings"
+ "sync"
buildahDefine "github.com/containers/buildah/define"
"github.com/containers/common/libimage"
@@ -698,9 +699,24 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
fmt.Println(playKubePod.ContainerErrors)
}
- // Wait for each proxy to receive a READY message.
- for _, proxy := range sdNotifyProxies {
- if err := proxy.WaitAndClose(); err != nil {
+ // Wait for each proxy to receive a READY message. Use a wait
+ // group to prevent the potential for ABBA kinds of deadlocks.
+ var wg sync.WaitGroup
+ errors := make([]error, len(sdNotifyProxies))
+ for i := range sdNotifyProxies {
+ wg.Add(1)
+ go func(i int) {
+ err := sdNotifyProxies[i].WaitAndClose()
+ if err != nil {
+ err = fmt.Errorf("waiting for sd-notify proxy: %w", err)
+ }
+ errors[i] = err
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+ for _, err := range errors {
+ if err != nil {
return nil, err
}
}
diff --git a/pkg/systemd/notifyproxy/notifyproxy.go b/pkg/systemd/notifyproxy/notifyproxy.go
index ea1522bb3..4b92d9e6c 100644
--- a/pkg/systemd/notifyproxy/notifyproxy.go
+++ b/pkg/systemd/notifyproxy/notifyproxy.go
@@ -1,6 +1,7 @@
package notifyproxy
import (
+ "context"
"errors"
"fmt"
"io"
@@ -109,48 +110,75 @@ func (p *NotifyProxy) WaitAndClose() error {
}
}()
- const bufferSize = 1024
- sBuilder := strings.Builder{}
- for {
- // Set a read deadline of one second such that we achieve a
- // non-blocking read and can check if the container has already
- // stopped running; in that case no READY message will be send
- // and we're done.
- if err := p.connection.SetReadDeadline(time.Now().Add(time.Second)); err != nil {
- return err
- }
-
+ // Since reading from the connection is blocking, we need to spin up two
+ // goroutines. One waiting for the `READY` message, the other waiting
+ // for the container to stop running.
+ errorChan := make(chan error, 1)
+ readyChan := make(chan bool, 1)
+
+ go func() {
+ // Read until the `READY` message is received or the connection
+ // is closed.
+ const bufferSize = 1024
+ sBuilder := strings.Builder{}
for {
- buffer := make([]byte, bufferSize)
- num, err := p.connection.Read(buffer)
- if err != nil {
- if !errors.Is(err, os.ErrDeadlineExceeded) && !errors.Is(err, io.EOF) {
- return err
+ for {
+ buffer := make([]byte, bufferSize)
+ num, err := p.connection.Read(buffer)
+ if err != nil {
+ if !errors.Is(err, io.EOF) {
+ errorChan <- err
+ return
+ }
+ }
+ sBuilder.Write(buffer[:num])
+ if num != bufferSize || buffer[num-1] == '\n' {
+ // Break as we read an entire line that
+ // we can inspect for the `READY`
+ // message.
+ break
}
}
- sBuilder.Write(buffer[:num])
- if num != bufferSize || buffer[num-1] == '\n' {
- break
- }
- }
- for _, line := range strings.Split(sBuilder.String(), "\n") {
- if line == daemon.SdNotifyReady {
- return nil
+ for _, line := range strings.Split(sBuilder.String(), "\n") {
+ if line == daemon.SdNotifyReady {
+ readyChan <- true
+ return
+ }
}
+ sBuilder.Reset()
}
- sBuilder.Reset()
+ }()
- if p.container == nil {
- continue
- }
+ if p.container != nil {
+ // Create a cancellable context to make sure the goroutine
+ // below terminates.
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go func() {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ state, err := p.container.State()
+ if err != nil {
+ errorChan <- err
+ return
+ }
+ if state != define.ContainerStateRunning {
+ errorChan <- fmt.Errorf("%w: %s", ErrNoReadyMessage, p.container.ID())
+ return
+ }
+ time.Sleep(time.Second)
+ }
+ }()
+ }
- state, err := p.container.State()
- if err != nil {
- return err
- }
- if state != define.ContainerStateRunning {
- return fmt.Errorf("%w: %s", ErrNoReadyMessage, p.container.ID())
- }
+ // Wait for the ready/error channel.
+ select {
+ case <-readyChan:
+ return nil
+ case err := <-errorChan:
+ return err
}
}
diff --git a/test/apiv2/27-containersEvents.at b/test/apiv2/27-containersEvents.at
index e0a66e0ac..a5b5b24a3 100644
--- a/test/apiv2/27-containersEvents.at
+++ b/test/apiv2/27-containersEvents.at
@@ -20,7 +20,7 @@ t GET "libpod/events?stream=false&since=$START" 200 \
t GET "libpod/events?stream=false&since=$START" 200 \
'select(.status | contains("start")).Action=start' \
- 'select(.status | contains("start")).HealthStatus='\
+ 'select(.status | contains("start")).HealthStatus=null'\
# compat api, uses status=die (#12643)
t GET "events?stream=false&since=$START" 200 \
diff --git a/test/buildah-bud/apply-podman-deltas b/test/buildah-bud/apply-podman-deltas
index 999f36bf9..1ab409359 100755
--- a/test/buildah-bud/apply-podman-deltas
+++ b/test/buildah-bud/apply-podman-deltas
@@ -70,7 +70,10 @@ function _skip() {
for t in "$@"; do
if fgrep -qx "@test \"$t\" {" $BUD; then
$ECHO "@test \"$t\" : $skip \"$reason\""
+ # Escape slash in test name, 'custom files in /run/'
t=${t//\//\\/}
+ # Escape star in test name, 'bud with --dns* flags'
+ t=${t//\*/\\\*}
sed -i -e "/^\@test \"$t\" {/ a \ \ $skip \"$reason\"" $BUD
else
warn "[$skip] Did not find test \"$t\" in $BUD"
diff --git a/test/e2e/events_test.go b/test/e2e/events_test.go
index bba42c3f4..c76919581 100644
--- a/test/e2e/events_test.go
+++ b/test/e2e/events_test.go
@@ -212,6 +212,16 @@ var _ = Describe("Podman events", func() {
Expect(result).Should(Exit(0))
Expect(result.OutputToStringArray()).To(HaveLen(1))
Expect(result.OutputToString()).To(ContainSubstring("create"))
+
+ ctrName := "testCtr"
+ run := podmanTest.Podman([]string{"create", "--pod", id, "--name", ctrName, ALPINE, "top"})
+ run.WaitWithDefaultTimeout()
+ Expect(run).Should(Exit(0))
+
+ result2 := podmanTest.Podman([]string{"events", "--stream=false", "--filter", fmt.Sprintf("container=%s", ctrName), "--since", "30s"})
+ result2.WaitWithDefaultTimeout()
+ Expect(result2).Should(Exit(0))
+ Expect(result2.OutputToString()).To(ContainSubstring(fmt.Sprintf("pod_id=%s", id)))
})
It("podman events health_status generated", func() {
@@ -229,7 +239,7 @@ var _ = Describe("Podman events", func() {
time.Sleep(1 * time.Second)
}
- result := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "event=health_status"})
+ result := podmanTest.Podman([]string{"events", "--stream=false", "--filter", "event=health_status", "--since", "1m"})
result.WaitWithDefaultTimeout()
Expect(result).Should(Exit(0))
Expect(len(result.OutputToStringArray())).To(BeNumerically(">=", 1), "Number of health_status events")
diff --git a/test/e2e/secret_test.go b/test/e2e/secret_test.go
index 286815e67..668a4943c 100644
--- a/test/e2e/secret_test.go
+++ b/test/e2e/secret_test.go
@@ -96,6 +96,23 @@ var _ = Describe("Podman secret", func() {
Expect(inspect.OutputToString()).To(Equal(secrID))
})
+ It("podman secret inspect with --pretty", func() {
+ secretFilePath := filepath.Join(podmanTest.TempDir, "secret")
+ err := os.WriteFile(secretFilePath, []byte("mysecret"), 0755)
+ Expect(err).To(BeNil())
+
+ session := podmanTest.Podman([]string{"secret", "create", "a", secretFilePath})
+ session.WaitWithDefaultTimeout()
+ secrID := session.OutputToString()
+ Expect(session).Should(Exit(0))
+
+ inspect := podmanTest.Podman([]string{"secret", "inspect", "--pretty", secrID})
+ inspect.WaitWithDefaultTimeout()
+ Expect(inspect).Should(Exit(0))
+ Expect(inspect.OutputToString()).To(ContainSubstring("Name:"))
+ Expect(inspect.OutputToString()).To(ContainSubstring(secrID))
+ })
+
It("podman secret inspect multiple secrets", func() {
secretFilePath := filepath.Join(podmanTest.TempDir, "secret")
err := os.WriteFile(secretFilePath, []byte("mysecret"), 0755)
@@ -125,7 +142,6 @@ var _ = Describe("Podman secret", func() {
inspect := podmanTest.Podman([]string{"secret", "inspect", "bogus"})
inspect.WaitWithDefaultTimeout()
Expect(inspect).To(ExitWithError())
-
})
It("podman secret ls", func() {
diff --git a/test/system/070-build.bats b/test/system/070-build.bats
index 87979483e..b392fd8e9 100644
--- a/test/system/070-build.bats
+++ b/test/system/070-build.bats
@@ -246,7 +246,7 @@ EOF
# Now test COPY. That should fail.
sed -i -e 's/ADD/COPY/' $tmpdir/Dockerfile
run_podman 125 build -t copy_url $tmpdir
- is "$output" ".*error building at STEP .*: source can't be a URL for COPY"
+ is "$output" ".* building at STEP .*: source can't be a URL for COPY"
}
@@ -853,7 +853,7 @@ EOF
run_podman 125 build -t build_test --pull-never $tmpdir
is "$output" \
- ".*Error: error creating build container: quay.io/libpod/nosuchimage:nosuchtag: image not known" \
+ ".*Error: creating build container: quay.io/libpod/nosuchimage:nosuchtag: image not known" \
"--pull-never fails with expected error message"
}
@@ -988,7 +988,7 @@ COPY ./ ./
COPY subdir ./
EOF
run_podman 125 build -t build_test $tmpdir
- is "$output" ".*Error: error building at STEP \"COPY subdir ./\"" ".dockerignore was ignored"
+ is "$output" ".*Error: building at STEP \"COPY subdir ./\"" ".dockerignore was ignored"
}
@test "podman build .containerignore and .dockerignore test" {
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index 1f820ea55..987313e18 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -105,7 +105,7 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
if lastModified != "" {
d, err := time.Parse(time.RFC1123, lastModified)
if err != nil {
- return fmt.Errorf("error parsing last-modified time: %w", err)
+ return fmt.Errorf("parsing last-modified time: %w", err)
}
date = d
}
@@ -117,17 +117,17 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
// we can figure out how much content there is.
f, err := ioutil.TempFile(mountpoint, "download")
if err != nil {
- return fmt.Errorf("error creating temporary file to hold %q: %w", src, err)
+ return fmt.Errorf("creating temporary file to hold %q: %w", src, err)
}
defer os.Remove(f.Name())
defer f.Close()
size, err = io.Copy(f, response.Body)
if err != nil {
- return fmt.Errorf("error writing %q to temporary file %q: %w", src, f.Name(), err)
+ return fmt.Errorf("writing %q to temporary file %q: %w", src, f.Name(), err)
}
_, err = f.Seek(0, io.SeekStart)
if err != nil {
- return fmt.Errorf("error setting up to read %q from temporary file %q: %w", src, f.Name(), err)
+ return fmt.Errorf("setting up to read %q from temporary file %q: %w", src, f.Name(), err)
}
responseBody = f
}
@@ -155,11 +155,11 @@ func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string,
}
err = tw.WriteHeader(&hdr)
if err != nil {
- return fmt.Errorf("error writing header: %w", err)
+ return fmt.Errorf("writing header: %w", err)
}
if _, err := io.Copy(tw, responseBody); err != nil {
- return fmt.Errorf("error writing content from %q to tar stream: %w", src, err)
+ return fmt.Errorf("writing content from %q to tar stream: %w", src, err)
}
return nil
@@ -208,13 +208,13 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
contextDir = string(os.PathSeparator)
currentDir, err = os.Getwd()
if err != nil {
- return fmt.Errorf("error determining current working directory: %w", err)
+ return fmt.Errorf("determining current working directory: %w", err)
}
} else {
if !filepath.IsAbs(options.ContextDir) {
contextDir, err = filepath.Abs(options.ContextDir)
if err != nil {
- return fmt.Errorf("error converting context directory path %q to an absolute path: %w", options.ContextDir, err)
+ return fmt.Errorf("converting context directory path %q to an absolute path: %w", options.ContextDir, err)
}
}
}
@@ -273,14 +273,14 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if options.Chown != "" {
userUID, userGID, err = b.userForCopy(mountPoint, options.Chown)
if err != nil {
- return fmt.Errorf("error looking up UID/GID for %q: %w", options.Chown, err)
+ return fmt.Errorf("looking up UID/GID for %q: %w", options.Chown, err)
}
}
var chmodDirsFiles *os.FileMode
if options.Chmod != "" {
p, err := strconv.ParseUint(options.Chmod, 8, 32)
if err != nil {
- return fmt.Errorf("error parsing chmod %q: %w", options.Chmod, err)
+ return fmt.Errorf("parsing chmod %q: %w", options.Chmod, err)
}
perm := os.FileMode(p)
chmodDirsFiles = &perm
@@ -332,7 +332,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}
destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
if err != nil {
- return fmt.Errorf("error checking on destination %v: %w", extractDirectory, err)
+ return fmt.Errorf("checking on destination %v: %w", extractDirectory, err)
}
if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
// destination doesn't exist - extract to parent and rename the incoming file to the destination's name
@@ -357,7 +357,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
pm, err := fileutils.NewPatternMatcher(options.Excludes)
if err != nil {
- return fmt.Errorf("error processing excludes list %v: %w", options.Excludes, err)
+ return fmt.Errorf("processing excludes list %v: %w", options.Excludes, err)
}
// Make sure that, if it's a symlink, we'll chroot to the target of the link;
@@ -365,7 +365,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
evalOptions := copier.EvalOptions{}
evaluated, err := copier.Eval(mountPoint, extractDirectory, evalOptions)
if err != nil {
- return fmt.Errorf("error checking on destination %v: %w", extractDirectory, err)
+ return fmt.Errorf("checking on destination %v: %w", extractDirectory, err)
}
extractDirectory = evaluated
@@ -383,7 +383,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
ChownNew: chownDirs,
}
if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
- return fmt.Errorf("error ensuring target directory exists: %w", err)
+ return fmt.Errorf("ensuring target directory exists: %w", err)
}
// Copy each source in turn.
@@ -427,10 +427,10 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}()
wg.Wait()
if getErr != nil {
- getErr = fmt.Errorf("error reading %q: %w", src, getErr)
+ getErr = fmt.Errorf("reading %q: %w", src, getErr)
}
if putErr != nil {
- putErr = fmt.Errorf("error storing %q: %w", src, putErr)
+ putErr = fmt.Errorf("storing %q: %w", src, putErr)
}
multiErr = multierror.Append(getErr, putErr)
if multiErr != nil && multiErr.ErrorOrNil() != nil {
@@ -459,7 +459,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
for _, glob := range localSourceStat.Globbed {
rel, err := filepath.Rel(contextDir, glob)
if err != nil {
- return fmt.Errorf("error computing path of %q relative to %q: %w", glob, contextDir, err)
+ return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err)
}
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
@@ -468,7 +468,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
if rel != "." {
excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
if err != nil {
- return fmt.Errorf("error checking if %q(%q) is excluded: %w", glob, rel, err)
+ return fmt.Errorf("checking if %q(%q) is excluded: %w", glob, rel, err)
}
if excluded {
// non-directories that are excluded are excluded, no question, but
@@ -562,16 +562,16 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption
}()
wg.Wait()
if getErr != nil {
- getErr = fmt.Errorf("error reading %q: %w", src, getErr)
+ getErr = fmt.Errorf("reading %q: %w", src, getErr)
}
if closeErr != nil {
- closeErr = fmt.Errorf("error closing %q: %w", src, closeErr)
+ closeErr = fmt.Errorf("closing %q: %w", src, closeErr)
}
if renameErr != nil {
- renameErr = fmt.Errorf("error renaming %q: %w", src, renameErr)
+ renameErr = fmt.Errorf("renaming %q: %w", src, renameErr)
}
if putErr != nil {
- putErr = fmt.Errorf("error storing %q: %w", src, putErr)
+ putErr = fmt.Errorf("storing %q: %w", src, putErr)
}
multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
if multiErr != nil && multiErr.ErrorOrNil() != nil {
diff --git a/vendor/github.com/containers/buildah/bind/mount.go b/vendor/github.com/containers/buildah/bind/mount.go
index 212be3ca8..213b1f64d 100644
--- a/vendor/github.com/containers/buildah/bind/mount.go
+++ b/vendor/github.com/containers/buildah/bind/mount.go
@@ -35,22 +35,22 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
// Create a new mount namespace in which to do the things we're doing.
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
- return nil, fmt.Errorf("error creating new mount namespace for %v: %w", spec.Process.Args, err)
+ return nil, fmt.Errorf("creating new mount namespace for %v: %w", spec.Process.Args, err)
}
// Make all of our mounts private to our namespace.
if err := mount.MakeRPrivate("/"); err != nil {
- return nil, fmt.Errorf("error making mounts private to mount namespace for %v: %w", spec.Process.Args, err)
+ return nil, fmt.Errorf("making mounts private to mount namespace for %v: %w", spec.Process.Args, err)
}
// Make sure the bundle directory is searchable. We created it with
// TempDir(), so it should have started with permissions set to 0700.
info, err := os.Stat(bundlePath)
if err != nil {
- return nil, fmt.Errorf("error checking permissions on %q: %w", bundlePath, err)
+ return nil, fmt.Errorf("checking permissions on %q: %w", bundlePath, err)
}
if err = os.Chmod(bundlePath, info.Mode()|0111); err != nil {
- return nil, fmt.Errorf("error loosening permissions on %q: %w", bundlePath, err)
+ return nil, fmt.Errorf("loosening permissions on %q: %w", bundlePath, err)
}
// Figure out who needs to be able to reach these bind mounts in order
@@ -117,23 +117,23 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
// access.
mnt := filepath.Join(bundlePath, "mnt")
if err = idtools.MkdirAndChown(mnt, 0100, idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}); err != nil {
- return unmountAll, fmt.Errorf("error creating %q owned by the container's root user: %w", mnt, err)
+ return unmountAll, fmt.Errorf("creating %q owned by the container's root user: %w", mnt, err)
}
// Make that directory private, and add it to the list of locations we
// unmount at cleanup time.
if err = mount.MakeRPrivate(mnt); err != nil {
- return unmountAll, fmt.Errorf("error marking filesystem at %q as private: %w", mnt, err)
+ return unmountAll, fmt.Errorf("marking filesystem at %q as private: %w", mnt, err)
}
unmount = append([]string{mnt}, unmount...)
// Create a bind mount for the root filesystem and add it to the list.
rootfs := filepath.Join(mnt, "rootfs")
if err = os.Mkdir(rootfs, 0000); err != nil {
- return unmountAll, fmt.Errorf("error creating directory %q: %w", rootfs, err)
+ return unmountAll, fmt.Errorf("creating directory %q: %w", rootfs, err)
}
if err = unix.Mount(rootPath, rootfs, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
- return unmountAll, fmt.Errorf("error bind mounting root filesystem from %q to %q: %w", rootPath, rootfs, err)
+ return unmountAll, fmt.Errorf("bind mounting root filesystem from %q to %q: %w", rootPath, rootfs, err)
}
logrus.Debugf("bind mounted %q to %q", rootPath, rootfs)
unmount = append([]string{rootfs}, unmount...)
@@ -154,28 +154,28 @@ func SetupIntermediateMountNamespace(spec *specs.Spec, bundlePath string) (unmou
logrus.Warnf("couldn't find %q on host to bind mount into container", spec.Mounts[i].Source)
continue
}
- return unmountAll, fmt.Errorf("error checking if %q is a directory: %w", spec.Mounts[i].Source, err)
+ return unmountAll, fmt.Errorf("checking if %q is a directory: %w", spec.Mounts[i].Source, err)
}
stage := filepath.Join(mnt, fmt.Sprintf("buildah-bind-target-%d", i))
if info.IsDir() {
// If the source is a directory, make one to use as the
// mount target.
if err = os.Mkdir(stage, 0000); err != nil {
- return unmountAll, fmt.Errorf("error creating directory %q: %w", stage, err)
+ return unmountAll, fmt.Errorf("creating directory %q: %w", stage, err)
}
} else {
// If the source is not a directory, create an empty
// file to use as the mount target.
file, err := os.OpenFile(stage, os.O_WRONLY|os.O_CREATE, 0000)
if err != nil {
- return unmountAll, fmt.Errorf("error creating file %q: %w", stage, err)
+ return unmountAll, fmt.Errorf("creating file %q: %w", stage, err)
}
file.Close()
}
// Bind mount the source from wherever it is to a place where
// we know the runtime helper will be able to get to it...
if err = unix.Mount(spec.Mounts[i].Source, stage, "", unix.MS_BIND|unix.MS_REC|unix.MS_PRIVATE, ""); err != nil {
- return unmountAll, fmt.Errorf("error bind mounting bind object from %q to %q: %w", spec.Mounts[i].Source, stage, err)
+ return unmountAll, fmt.Errorf("bind mounting bind object from %q to %q: %w", spec.Mounts[i].Source, stage, err)
}
logrus.Debugf("bind mounted %q to %q", spec.Mounts[i].Source, stage)
spec.Mounts[i].Source = stage
@@ -209,7 +209,7 @@ func leaveBindMountAlone(mount specs.Mount) bool {
func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
mounts, err := mount.GetMounts()
if err != nil {
- return fmt.Errorf("error retrieving list of mounts: %w", err)
+ return fmt.Errorf("retrieving list of mounts: %w", err)
}
// getChildren returns the list of mount IDs that hang off of the
// specified ID.
@@ -273,7 +273,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
logrus.Debugf("mountpoint %q is not present(?), skipping", mount.Mountpoint)
continue
}
- return fmt.Errorf("error checking if %q is mounted: %w", mount.Mountpoint, err)
+ return fmt.Errorf("checking if %q is mounted: %w", mount.Mountpoint, err)
}
if uint64(mount.Major) != uint64(st.Dev) || uint64(mount.Minor) != uint64(st.Dev) { //nolint:unconvert // (required for some OS/arch combinations)
logrus.Debugf("%q is apparently not really mounted, skipping", mount.Mountpoint)
@@ -296,7 +296,7 @@ func UnmountMountpoints(mountpoint string, mountpointsToRemove []string) error {
// if we're also supposed to remove this thing, do that, too
if cutil.StringInSlice(mount.Mountpoint, mountpointsToRemove) {
if err := os.Remove(mount.Mountpoint); err != nil {
- return fmt.Errorf("error removing %q: %w", mount.Mountpoint, err)
+ return fmt.Errorf("removing %q: %w", mount.Mountpoint, err)
}
}
}
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 41f1ba311..3802a727f 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -408,7 +408,7 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) {
}
b := &Builder{}
if err = json.Unmarshal(buildstate, &b); err != nil {
- return nil, fmt.Errorf("error parsing %q, read from %q: %w", string(buildstate), filepath.Join(cdir, stateFile), err)
+ return nil, fmt.Errorf("parsing %q, read from %q: %w", string(buildstate), filepath.Join(cdir, stateFile), err)
}
if b.Type != containerType {
return nil, fmt.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type)
@@ -484,7 +484,7 @@ func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) {
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
- logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID)
+ logrus.Debugf("%v, ignoring container %q", err, container.ID)
continue
}
return nil, err
@@ -520,7 +520,7 @@ func (b *Builder) Save() error {
return err
}
if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil {
- return fmt.Errorf("error saving builder state to %q: %w", filepath.Join(cdir, stateFile), err)
+ return fmt.Errorf("saving builder state to %q: %w", filepath.Join(cdir, stateFile), err)
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/chroot/pty_ptmx.go b/vendor/github.com/containers/buildah/chroot/pty_ptmx.go
index e613c7571..b1ba96bc9 100644
--- a/vendor/github.com/containers/buildah/chroot/pty_ptmx.go
+++ b/vendor/github.com/containers/buildah/chroot/pty_ptmx.go
@@ -18,28 +18,28 @@ func getPtyDescriptors() (int, int, error) {
// Create a pseudo-terminal -- open a copy of the master side.
controlFd, err := unix.Open("/dev/ptmx", os.O_RDWR, 0600)
if err != nil {
- return -1, -1, fmt.Errorf("error opening PTY master using /dev/ptmx: %v", err)
+ return -1, -1, fmt.Errorf("opening PTY master using /dev/ptmx: %v", err)
}
// Set the kernel's lock to "unlocked".
locked := 0
if result, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(controlFd), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&locked))); int(result) == -1 {
- return -1, -1, fmt.Errorf("error unlocking PTY descriptor: %v", err)
+ return -1, -1, fmt.Errorf("unlocking PTY descriptor: %v", err)
}
// Get a handle for the other end.
ptyFd, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(controlFd), unix.TIOCGPTPEER, unix.O_RDWR|unix.O_NOCTTY)
if int(ptyFd) == -1 {
if errno, isErrno := err.(syscall.Errno); !isErrno || (errno != syscall.EINVAL && errno != syscall.ENOTTY) {
- return -1, -1, fmt.Errorf("error getting PTY descriptor: %v", err)
+ return -1, -1, fmt.Errorf("getting PTY descriptor: %v", err)
}
// EINVAL means the kernel's too old to understand TIOCGPTPEER. Try TIOCGPTN.
ptyN, err := unix.IoctlGetInt(controlFd, unix.TIOCGPTN)
if err != nil {
- return -1, -1, fmt.Errorf("error getting PTY number: %v", err)
+ return -1, -1, fmt.Errorf("getting PTY number: %v", err)
}
ptyName := fmt.Sprintf("/dev/pts/%d", ptyN)
fd, err := unix.Open(ptyName, unix.O_RDWR|unix.O_NOCTTY, 0620)
if err != nil {
- return -1, -1, fmt.Errorf("error opening PTY %q: %v", ptyName, err)
+ return -1, -1, fmt.Errorf("opening PTY %q: %v", ptyName, err)
}
ptyFd = uintptr(fd)
}
diff --git a/vendor/github.com/containers/buildah/chroot/run_common.go b/vendor/github.com/containers/buildah/chroot/run_common.go
index 34952e59f..040b68286 100644
--- a/vendor/github.com/containers/buildah/chroot/run_common.go
+++ b/vendor/github.com/containers/buildah/chroot/run_common.go
@@ -74,7 +74,7 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
return err
}
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
- return fmt.Errorf("error storing runtime configuration: %w", err)
+ return fmt.Errorf("storing runtime configuration: %w", err)
}
logrus.Debugf("config = %v", string(specbytes))
@@ -92,14 +92,14 @@ func RunUsingChroot(spec *specs.Spec, bundlePath, homeDir string, stdin io.Reade
// Create a pipe for passing configuration down to the next process.
preader, pwriter, err := os.Pipe()
if err != nil {
- return fmt.Errorf("error creating configuration pipe: %w", err)
+ return fmt.Errorf("creating configuration pipe: %w", err)
}
config, conferr := json.Marshal(runUsingChrootSubprocOptions{
Spec: spec,
BundlePath: bundlePath,
})
if conferr != nil {
- return fmt.Errorf("error encoding configuration for %q: %w", runUsingChrootCommand, conferr)
+ return fmt.Errorf("encoding configuration for %q: %w", runUsingChrootCommand, conferr)
}
// Set our terminal's mode to raw, to pass handling of special
@@ -488,7 +488,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
// Create a pipe for passing configuration down to the next process.
preader, pwriter, err := os.Pipe()
if err != nil {
- return 1, fmt.Errorf("error creating configuration pipe: %w", err)
+ return 1, fmt.Errorf("creating configuration pipe: %w", err)
}
config, conferr := json.Marshal(runUsingChrootExecSubprocOptions{
Spec: spec,
@@ -514,7 +514,7 @@ func runUsingChroot(spec *specs.Spec, bundlePath string, ctty *os.File, stdin io
}
cmd.ExtraFiles = append([]*os.File{preader}, cmd.ExtraFiles...)
if err := setPlatformUnshareOptions(spec, cmd); err != nil {
- return 1, fmt.Errorf("error setting platform unshare options: %w", err)
+ return 1, fmt.Errorf("setting platform unshare options: %w", err)
}
interrupted := make(chan os.Signal, 100)
@@ -778,7 +778,7 @@ func parseRlimits(spec *specs.Spec) (map[int]unix.Rlimit, error) {
for _, limit := range spec.Process.Rlimits {
resource, recognized := rlimitsMap[strings.ToUpper(limit.Type)]
if !recognized {
- return nil, fmt.Errorf("error parsing limit type %q", limit.Type)
+ return nil, fmt.Errorf("parsing limit type %q", limit.Type)
}
parsed[resource] = makeRlimit(limit)
}
@@ -795,7 +795,7 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
for resource, desired := range limits {
var current unix.Rlimit
if err := unix.Getrlimit(resource, &current); err != nil {
- return fmt.Errorf("error reading %q limit: %w", rlimitsReverseMap[resource], err)
+ return fmt.Errorf("reading %q limit: %w", rlimitsReverseMap[resource], err)
}
if desired.Max > current.Max && onlyLower {
// this would raise a hard limit, and we're only here to lower them
@@ -806,7 +806,7 @@ func setRlimits(spec *specs.Spec, onlyLower, onlyRaise bool) error {
continue
}
if err := unix.Setrlimit(resource, &desired); err != nil {
- return fmt.Errorf("error setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d): %w", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max, err)
+ return fmt.Errorf("setting %q limit to soft=%d,hard=%d (was soft=%d,hard=%d): %w", rlimitsReverseMap[resource], desired.Cur, desired.Max, current.Cur, current.Max, err)
}
}
return nil
diff --git a/vendor/github.com/containers/buildah/chroot/run_freebsd.go b/vendor/github.com/containers/buildah/chroot/run_freebsd.go
index 239322f56..52763ee97 100644
--- a/vendor/github.com/containers/buildah/chroot/run_freebsd.go
+++ b/vendor/github.com/containers/buildah/chroot/run_freebsd.go
@@ -82,7 +82,7 @@ func createPlatformContainer(options runUsingChrootExecSubprocOptions) error {
jconf.Set("enforce_statfs", 1)
_, err := jail.CreateAndAttach(jconf)
if err != nil {
- return fmt.Errorf("error creating jail: %w", err)
+ return fmt.Errorf("creating jail: %w", err)
}
return nil
}
@@ -97,7 +97,7 @@ func makeReadOnly(mntpoint string, flags uintptr) error {
var fs unix.Statfs_t
// Make sure it's read-only.
if err := unix.Statfs(mntpoint, &fs); err != nil {
- return fmt.Errorf("error checking if directory %q was bound read-only: %w", mntpoint, err)
+ return fmt.Errorf("checking if directory %q was bound read-only: %w", mntpoint, err)
}
return nil
}
@@ -174,14 +174,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
case "nullfs":
srcinfo, err = os.Stat(m.Source)
if err != nil {
- return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", m.Source, err)
+ return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", m.Source, err)
}
}
target := filepath.Join(spec.Root.Path, m.Destination)
if _, err := os.Stat(target); err != nil {
// If the target can't be stat()ted, check the error.
if !os.IsNotExist(err) {
- return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", target, err)
+ return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", target, err)
}
// The target isn't there yet, so create it, and make a
// note to remove it later.
@@ -189,12 +189,12 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// Leaving it here since I plan to add this to FreeBSD's nullfs.
if m.Type != "nullfs" || srcinfo.IsDir() {
if err = os.MkdirAll(target, 0111); err != nil {
- return undoBinds, fmt.Errorf("error creating mountpoint %q in mount namespace: %w", target, err)
+ return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
}
removes = append(removes, target)
} else {
if err = os.MkdirAll(filepath.Dir(target), 0111); err != nil {
- return undoBinds, fmt.Errorf("error ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
+ return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
}
// Don't do this until we can support file mounts in nullfs
/*var file *os.File
@@ -219,7 +219,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
err = os.MkdirAll(save, 0111)
}
if err != nil {
- return undoBinds, fmt.Errorf("error creating file mount save directory %q: %w", save, err)
+ return undoBinds, fmt.Errorf("creating file mount save directory %q: %w", save, err)
}
removes = append(removes, save)
}
@@ -227,7 +227,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
if _, err := os.Stat(target); err == nil {
logrus.Debugf("moving %q to %q", target, savePath)
if err := os.Rename(target, savePath); err != nil {
- return undoBinds, fmt.Errorf("error moving %q to %q: %w", target, savePath, err)
+ return undoBinds, fmt.Errorf("moving %q to %q: %w", target, savePath, err)
}
renames = append(renames, rename{
from: target,
@@ -238,12 +238,12 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
removes = append(removes, target)
}
if err := copyFile(m.Source, target); err != nil {
- return undoBinds, fmt.Errorf("error copying %q to %q: %w", m.Source, target, err)
+ return undoBinds, fmt.Errorf("copying %q to %q: %w", m.Source, target, err)
}
} else {
logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination))
if err := mount.Mount(m.Source, target, "nullfs", strings.Join(m.Options, ",")); err != nil {
- return undoBinds, fmt.Errorf("error bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
+ return undoBinds, fmt.Errorf("bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
}
logrus.Debugf("bind mounted %q to %q", m.Source, target)
unmounts = append(unmounts, target)
@@ -251,7 +251,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
case "devfs", "fdescfs", "tmpfs":
// Mount /dev, /dev/fd.
if err := mount.Mount(m.Source, target, m.Type, strings.Join(m.Options, ",")); err != nil {
- return undoBinds, fmt.Errorf("error mounting %q to %q in mount namespace (%q, %q): %w", m.Type, m.Destination, target, strings.Join(m.Options, ","), err)
+ return undoBinds, fmt.Errorf("mounting %q to %q in mount namespace (%q, %q): %w", m.Type, m.Destination, target, strings.Join(m.Options, ","), err)
}
logrus.Debugf("mounted a %q to %q", m.Type, target)
unmounts = append(unmounts, target)
diff --git a/vendor/github.com/containers/buildah/chroot/run_linux.go b/vendor/github.com/containers/buildah/chroot/run_linux.go
index 7e1dcca14..f2a9c9034 100644
--- a/vendor/github.com/containers/buildah/chroot/run_linux.go
+++ b/vendor/github.com/containers/buildah/chroot/run_linux.go
@@ -158,7 +158,7 @@ func setApparmorProfile(spec *specs.Spec) error {
return nil
}
if err := apparmor.ApplyProfile(spec.Process.ApparmorProfile); err != nil {
- return fmt.Errorf("error setting apparmor profile to %q: %w", spec.Process.ApparmorProfile, err)
+ return fmt.Errorf("setting apparmor profile to %q: %w", spec.Process.ApparmorProfile, err)
}
return nil
}
@@ -167,14 +167,14 @@ func setApparmorProfile(spec *specs.Spec) error {
func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
currentCaps, err := capability.NewPid2(0)
if err != nil {
- return fmt.Errorf("error reading capabilities of current process: %w", err)
+ return fmt.Errorf("reading capabilities of current process: %w", err)
}
if err := currentCaps.Load(); err != nil {
- return fmt.Errorf("error loading capabilities: %w", err)
+ return fmt.Errorf("loading capabilities: %w", err)
}
caps, err := capability.NewPid2(0)
if err != nil {
- return fmt.Errorf("error reading capabilities of current process: %w", err)
+ return fmt.Errorf("reading capabilities of current process: %w", err)
}
capMap := map[capability.CapType][]string{
capability.BOUNDING: spec.Process.Capabilities.Bounding,
@@ -195,7 +195,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
}
}
if cap == noCap {
- return fmt.Errorf("error mapping capability %q to a number", capToSet)
+ return fmt.Errorf("mapping capability %q to a number", capToSet)
}
caps.Set(capType, cap)
}
@@ -208,7 +208,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
}
}
if cap == noCap {
- return fmt.Errorf("error mapping capability %q to a number", capToSet)
+ return fmt.Errorf("mapping capability %q to a number", capToSet)
}
if currentCaps.Get(capType, cap) {
caps.Set(capType, cap)
@@ -216,7 +216,7 @@ func setCapabilities(spec *specs.Spec, keepCaps ...string) error {
}
}
if err = caps.Apply(capability.CAPS | capability.BOUNDS | capability.AMBS); err != nil {
- return fmt.Errorf("error setting capabilities: %w", err)
+ return fmt.Errorf("setting capabilities: %w", err)
}
return nil
}
@@ -233,11 +233,11 @@ func makeReadOnly(mntpoint string, flags uintptr) error {
var fs unix.Statfs_t
// Make sure it's read-only.
if err := unix.Statfs(mntpoint, &fs); err != nil {
- return fmt.Errorf("error checking if directory %q was bound read-only: %w", mntpoint, err)
+ return fmt.Errorf("checking if directory %q was bound read-only: %w", mntpoint, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
if err := unix.Mount(mntpoint, mntpoint, "bind", flags|unix.MS_REMOUNT, ""); err != nil {
- return fmt.Errorf("error remounting %s in mount namespace read-only: %w", mntpoint, err)
+ return fmt.Errorf("remounting %s in mount namespace read-only: %w", mntpoint, err)
}
}
return nil
@@ -283,16 +283,16 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
}
if err != nil {
- return undoBinds, fmt.Errorf("error bind mounting /dev from host into mount namespace: %w", err)
+ return undoBinds, fmt.Errorf("bind mounting /dev from host into mount namespace: %w", err)
}
}
// Make sure it's read-only.
if err = unix.Statfs(subDev, &fs); err != nil {
- return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", subDev, err)
+ return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", subDev, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
if err := unix.Mount(subDev, subDev, "bind", devFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, fmt.Errorf("error remounting /dev in mount namespace read-only: %w", err)
+ return undoBinds, fmt.Errorf("remounting /dev in mount namespace read-only: %w", err)
}
}
logrus.Debugf("bind mounted %q to %q", "/dev", filepath.Join(spec.Root.Path, "/dev"))
@@ -307,7 +307,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
}
if err != nil {
- return undoBinds, fmt.Errorf("error bind mounting /proc from host into mount namespace: %w", err)
+ return undoBinds, fmt.Errorf("bind mounting /proc from host into mount namespace: %w", err)
}
}
logrus.Debugf("bind mounted %q to %q", "/proc", filepath.Join(spec.Root.Path, "/proc"))
@@ -322,7 +322,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
}
if err != nil {
- return undoBinds, fmt.Errorf("error bind mounting /sys from host into mount namespace: %w", err)
+ return undoBinds, fmt.Errorf("bind mounting /sys from host into mount namespace: %w", err)
}
}
if err := makeReadOnly(subSys, sysFlags); err != nil {
@@ -380,14 +380,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
case "bind":
srcinfo, err = os.Stat(m.Source)
if err != nil {
- return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", m.Source, err)
+ return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", m.Source, err)
}
case "overlay":
fallthrough
case "tmpfs":
srcinfo, err = os.Stat("/")
if err != nil {
- return undoBinds, fmt.Errorf("error examining / to use as a template for a %s: %w", m.Type, err)
+ return undoBinds, fmt.Errorf("examining / to use as a template for a %s: %w", m.Type, err)
}
}
target := filepath.Join(spec.Root.Path, m.Destination)
@@ -405,20 +405,20 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
if err != nil {
// If the target can't be stat()ted, check the error.
if !errors.Is(err, os.ErrNotExist) {
- return undoBinds, fmt.Errorf("error examining %q for mounting in mount namespace: %w", target, err)
+ return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", target, err)
}
// The target isn't there yet, so create it.
if srcinfo.IsDir() {
if err = os.MkdirAll(target, 0755); err != nil {
- return undoBinds, fmt.Errorf("error creating mountpoint %q in mount namespace: %w", target, err)
+ return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
}
} else {
if err = os.MkdirAll(filepath.Dir(target), 0755); err != nil {
- return undoBinds, fmt.Errorf("error ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
+ return undoBinds, fmt.Errorf("ensuring parent of mountpoint %q (%q) is present in mount namespace: %w", target, filepath.Dir(target), err)
}
var file *os.File
if file, err = os.OpenFile(target, os.O_WRONLY|os.O_CREATE, 0755); err != nil {
- return undoBinds, fmt.Errorf("error creating mountpoint %q in mount namespace: %w", target, err)
+ return undoBinds, fmt.Errorf("creating mountpoint %q in mount namespace: %w", target, err)
}
file.Close()
}
@@ -458,28 +458,28 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// Do the bind mount.
logrus.Debugf("bind mounting %q on %q", m.Destination, filepath.Join(spec.Root.Path, m.Destination))
if err := unix.Mount(m.Source, target, "", requestFlags, ""); err != nil {
- return undoBinds, fmt.Errorf("error bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
+ return undoBinds, fmt.Errorf("bind mounting %q from host to %q in mount namespace (%q): %w", m.Source, m.Destination, target, err)
}
logrus.Debugf("bind mounted %q to %q", m.Source, target)
case "tmpfs":
// Mount a tmpfs.
if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil {
- return undoBinds, fmt.Errorf("error mounting tmpfs to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err)
+ return undoBinds, fmt.Errorf("mounting tmpfs to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err)
}
logrus.Debugf("mounted a tmpfs to %q", target)
case "overlay":
// Mount a overlay.
if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil {
- return undoBinds, fmt.Errorf("error mounting overlay to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err)
+ return undoBinds, fmt.Errorf("mounting overlay to %q in mount namespace (%q, %q): %w", m.Destination, target, strings.Join(m.Options, ","), err)
}
logrus.Debugf("mounted a overlay to %q", target)
}
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", target, err)
+ return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", target, err)
}
if uintptr(fs.Flags)&expectedFlags != expectedFlags {
if err := unix.Mount(target, target, "bind", requestFlags|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, fmt.Errorf("error remounting %q in mount namespace with expected flags: %w", target, err)
+ return undoBinds, fmt.Errorf("remounting %q in mount namespace with expected flags: %w", target, err)
}
}
}
@@ -494,7 +494,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// No target, no problem.
continue
}
- return undoBinds, fmt.Errorf("error checking %q for symlinks before marking it read-only: %w", r, err)
+ return undoBinds, fmt.Errorf("checking %q for symlinks before marking it read-only: %w", r, err)
}
// Check if the location is already read-only.
var fs unix.Statfs_t
@@ -503,7 +503,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// No target, no problem.
continue
}
- return undoBinds, fmt.Errorf("error checking if directory %q is already read-only: %w", target, err)
+ return undoBinds, fmt.Errorf("checking if directory %q is already read-only: %w", target, err)
}
if fs.Flags&unix.ST_RDONLY != 0 {
continue
@@ -515,23 +515,23 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// No target, no problem.
continue
}
- return undoBinds, fmt.Errorf("error bind mounting %q onto itself in preparation for making it read-only: %w", target, err)
+ return undoBinds, fmt.Errorf("bind mounting %q onto itself in preparation for making it read-only: %w", target, err)
}
// Remount the location read-only.
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, fmt.Errorf("error checking if directory %q was bound read-only: %w", target, err)
+ return undoBinds, fmt.Errorf("checking if directory %q was bound read-only: %w", target, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
if err := unix.Mount(target, target, "", roFlags|unix.MS_BIND|unix.MS_REMOUNT, ""); err != nil {
- return undoBinds, fmt.Errorf("error remounting %q in mount namespace read-only: %w", target, err)
+ return undoBinds, fmt.Errorf("remounting %q in mount namespace read-only: %w", target, err)
}
}
// Check again.
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, fmt.Errorf("error checking if directory %q was remounted read-only: %w", target, err)
+ return undoBinds, fmt.Errorf("checking if directory %q was remounted read-only: %w", target, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
- return undoBinds, fmt.Errorf("error verifying that %q in mount namespace was remounted read-only: %w", target, err)
+ return undoBinds, fmt.Errorf("verifying that %q in mount namespace was remounted read-only: %w", target, err)
}
}
@@ -539,7 +539,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
roEmptyDir := filepath.Join(bundlePath, "empty")
if len(spec.Linux.MaskedPaths) > 0 {
if err := os.Mkdir(roEmptyDir, 0700); err != nil {
- return undoBinds, fmt.Errorf("error creating empty directory %q: %w", roEmptyDir, err)
+ return undoBinds, fmt.Errorf("creating empty directory %q: %w", roEmptyDir, err)
}
}
@@ -560,19 +560,19 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// No target, no problem.
continue
}
- return undoBinds, fmt.Errorf("error examining %q for masking in mount namespace: %w", target, err)
+ return undoBinds, fmt.Errorf("examining %q for masking in mount namespace: %w", target, err)
}
if targetinfo.IsDir() {
// The target's a directory. Check if it's a read-only filesystem.
var statfs unix.Statfs_t
if err = unix.Statfs(target, &statfs); err != nil {
- return undoBinds, fmt.Errorf("error checking if directory %q is a mountpoint: %w", target, err)
+ return undoBinds, fmt.Errorf("checking if directory %q is a mountpoint: %w", target, err)
}
isReadOnly := statfs.Flags&unix.MS_RDONLY != 0
// Check if any of the IDs we're mapping could read it.
var stat unix.Stat_t
if err = unix.Stat(target, &stat); err != nil {
- return undoBinds, fmt.Errorf("error checking permissions on directory %q: %w", target, err)
+ return undoBinds, fmt.Errorf("checking permissions on directory %q: %w", target, err)
}
isAccessible := false
if stat.Mode&unix.S_IROTH|unix.S_IXOTH != 0 {
@@ -603,13 +603,13 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
directory, err := os.Open(target)
if err != nil {
if !os.IsPermission(err) {
- return undoBinds, fmt.Errorf("error opening directory %q: %w", target, err)
+ return undoBinds, fmt.Errorf("opening directory %q: %w", target, err)
}
} else {
names, err := directory.Readdirnames(0)
directory.Close()
if err != nil {
- return undoBinds, fmt.Errorf("error reading contents of directory %q: %w", target, err)
+ return undoBinds, fmt.Errorf("reading contents of directory %q: %w", target, err)
}
hasContent = false
for _, name := range names {
@@ -628,14 +628,14 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
roFlags := uintptr(syscall.MS_BIND | syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_RDONLY)
if !isReadOnly || (hasContent && isAccessible) {
if err = unix.Mount(roEmptyDir, target, "bind", roFlags, ""); err != nil {
- return undoBinds, fmt.Errorf("error masking directory %q in mount namespace: %w", target, err)
+ return undoBinds, fmt.Errorf("masking directory %q in mount namespace: %w", target, err)
}
if err = unix.Statfs(target, &fs); err != nil {
- return undoBinds, fmt.Errorf("error checking if directory %q was mounted read-only in mount namespace: %w", target, err)
+ return undoBinds, fmt.Errorf("checking if directory %q was mounted read-only in mount namespace: %w", target, err)
}
if fs.Flags&unix.ST_RDONLY == 0 {
if err = unix.Mount(target, target, "", roFlags|syscall.MS_REMOUNT, ""); err != nil {
- return undoBinds, fmt.Errorf("error making sure directory %q in mount namespace is read only: %w", target, err)
+ return undoBinds, fmt.Errorf("making sure directory %q in mount namespace is read only: %w", target, err)
}
}
}
@@ -643,7 +643,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
// If the target's is not a directory or os.DevNull, bind mount os.DevNull over it.
if !isDevNull(targetinfo) {
if err = unix.Mount(os.DevNull, target, "", uintptr(syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_PRIVATE), ""); err != nil {
- return undoBinds, fmt.Errorf("error masking non-directory %q in mount namespace: %w", target, err)
+ return undoBinds, fmt.Errorf("masking non-directory %q in mount namespace: %w", target, err)
}
}
}
diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go
index aebb1a180..714dca628 100644
--- a/vendor/github.com/containers/buildah/chroot/seccomp.go
+++ b/vendor/github.com/containers/buildah/chroot/seccomp.go
@@ -111,11 +111,11 @@ func setSeccomp(spec *specs.Spec) error {
filter, err := libseccomp.NewFilter(mapAction(spec.Linux.Seccomp.DefaultAction, spec.Linux.Seccomp.DefaultErrnoRet))
if err != nil {
- return fmt.Errorf("error creating seccomp filter with default action %q: %w", spec.Linux.Seccomp.DefaultAction, err)
+ return fmt.Errorf("creating seccomp filter with default action %q: %w", spec.Linux.Seccomp.DefaultAction, err)
}
for _, arch := range spec.Linux.Seccomp.Architectures {
if err = filter.AddArch(mapArch(arch)); err != nil {
- return fmt.Errorf("error adding architecture %q(%q) to seccomp filter: %w", arch, mapArch(arch), err)
+ return fmt.Errorf("adding architecture %q(%q) to seccomp filter: %w", arch, mapArch(arch), err)
}
}
for _, rule := range spec.Linux.Seccomp.Syscalls {
@@ -131,7 +131,7 @@ func setSeccomp(spec *specs.Spec) error {
for scnum := range scnames {
if len(rule.Args) == 0 {
if err = filter.AddRule(scnum, mapAction(rule.Action, rule.ErrnoRet)); err != nil {
- return fmt.Errorf("error adding a rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
+ return fmt.Errorf("adding a rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
}
continue
}
@@ -140,7 +140,7 @@ func setSeccomp(spec *specs.Spec) error {
for _, arg := range rule.Args {
condition, err := libseccomp.MakeCondition(arg.Index, mapOp(arg.Op), arg.Value, arg.ValueTwo)
if err != nil {
- return fmt.Errorf("error building a seccomp condition %d:%v:%d:%d: %w", arg.Index, arg.Op, arg.Value, arg.ValueTwo, err)
+ return fmt.Errorf("building a seccomp condition %d:%v:%d:%d: %w", arg.Index, arg.Op, arg.Value, arg.ValueTwo, err)
}
if arg.Op != specs.OpEqualTo {
opsAreAllEquality = false
@@ -156,22 +156,22 @@ func setSeccomp(spec *specs.Spec) error {
if len(rule.Args) > 1 && opsAreAllEquality && err.Error() == "two checks on same syscall argument" {
for i := range conditions {
if err = filter.AddRuleConditional(scnum, mapAction(rule.Action, rule.ErrnoRet), conditions[i:i+1]); err != nil {
- return fmt.Errorf("error adding a conditional rule (%q:%q[%d]) to seccomp filter: %w", scnames[scnum], rule.Action, i, err)
+ return fmt.Errorf("adding a conditional rule (%q:%q[%d]) to seccomp filter: %w", scnames[scnum], rule.Action, i, err)
}
}
} else {
- return fmt.Errorf("error adding a conditional rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
+ return fmt.Errorf("adding a conditional rule (%q:%q) to seccomp filter: %w", scnames[scnum], rule.Action, err)
}
}
}
}
if err = filter.SetNoNewPrivsBit(spec.Process.NoNewPrivileges); err != nil {
- return fmt.Errorf("error setting no-new-privileges bit to %v: %w", spec.Process.NoNewPrivileges, err)
+ return fmt.Errorf("setting no-new-privileges bit to %v: %w", spec.Process.NoNewPrivileges, err)
}
err = filter.Load()
filter.Release()
if err != nil {
- return fmt.Errorf("error activating seccomp filter: %w", err)
+ return fmt.Errorf("activating seccomp filter: %w", err)
}
return nil
}
@@ -189,7 +189,7 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
default:
seccompProfile, err := ioutil.ReadFile(seccompProfilePath)
if err != nil {
- return fmt.Errorf("opening seccomp profile (%s) failed: %w", seccompProfilePath, err)
+ return fmt.Errorf("opening seccomp profile failed: %w", err)
}
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/chroot/selinux.go b/vendor/github.com/containers/buildah/chroot/selinux.go
index 538c0e3f4..bba4b8254 100644
--- a/vendor/github.com/containers/buildah/chroot/selinux.go
+++ b/vendor/github.com/containers/buildah/chroot/selinux.go
@@ -17,7 +17,7 @@ func setSelinuxLabel(spec *specs.Spec) error {
logrus.Debugf("setting selinux label")
if spec.Process.SelinuxLabel != "" && selinux.GetEnabled() {
if err := label.SetProcessLabel(spec.Process.SelinuxLabel); err != nil {
- return fmt.Errorf("error setting process label to %q: %w", spec.Process.SelinuxLabel, err)
+ return fmt.Errorf("setting process label to %q: %w", spec.Process.SelinuxLabel, err)
}
}
return nil
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 727f97b06..e53fbfe87 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -146,7 +146,7 @@ func checkRegistrySourcesAllows(forWhat string, dest types.ImageReference) (inse
AllowedRegistries []string `json:"allowedRegistries,omitempty"`
}
if err := json.Unmarshal([]byte(registrySources), &sources); err != nil {
- return false, fmt.Errorf("error parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err)
+ return false, fmt.Errorf("parsing $BUILD_REGISTRY_SOURCES (%q) as JSON: %w", registrySources, err)
}
blocked := false
if len(sources.BlockedRegistries) > 0 {
@@ -205,7 +205,7 @@ func (b *Builder) addManifest(ctx context.Context, manifestName string, imageSpe
names, err := util.ExpandNames([]string{manifestName}, systemContext, b.store)
if err != nil {
- return "", fmt.Errorf("error encountered while expanding manifest list name %q: %w", manifestName, err)
+ return "", fmt.Errorf("encountered while expanding manifest list name %q: %w", manifestName, err)
}
ref, err := util.VerifyTagName(imageSpec)
@@ -258,7 +258,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
nameToRemove = stringid.GenerateRandomID() + "-tmp"
dest2, err := is.Transport.ParseStoreReference(b.store, nameToRemove)
if err != nil {
- return imgID, nil, "", fmt.Errorf("error creating temporary destination reference for image: %w", err)
+ return imgID, nil, "", fmt.Errorf("creating temporary destination reference for image: %w", err)
}
dest = dest2
}
@@ -267,7 +267,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
blocked, err := isReferenceBlocked(dest, systemContext)
if err != nil {
- return "", nil, "", fmt.Errorf("error checking if committing to registry for %q is blocked: %w", transports.ImageName(dest), err)
+ return "", nil, "", fmt.Errorf("checking if committing to registry for %q is blocked: %w", transports.ImageName(dest), err)
}
if blocked {
return "", nil, "", fmt.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest))
@@ -276,14 +276,14 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// Load the system signing policy.
commitPolicy, err := signature.DefaultPolicy(systemContext)
if err != nil {
- return "", nil, "", fmt.Errorf("error obtaining default signature policy: %w", err)
+ return "", nil, "", fmt.Errorf("obtaining default signature policy: %w", err)
}
// Override the settings for local storage to make sure that we can always read the source "image".
commitPolicy.Transports[is.Transport.Name()] = storageAllowedPolicyScopes
policyContext, err := signature.NewPolicyContext(commitPolicy)
if err != nil {
- return imgID, nil, "", fmt.Errorf("error creating new signature policy context: %w", err)
+ return imgID, nil, "", fmt.Errorf("creating new signature policy context: %w", err)
}
defer func() {
if err2 := policyContext.Destroy(); err2 != nil {
@@ -309,7 +309,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
// Build an image reference from which we can copy the finished image.
src, err = b.makeContainerImageRef(options)
if err != nil {
- return imgID, nil, "", fmt.Errorf("error computing layer digests and building metadata for container %q: %w", b.ContainerID, err)
+ return imgID, nil, "", fmt.Errorf("computing layer digests and building metadata for container %q: %w", b.ContainerID, err)
}
// In case we're using caching, decide how to handle compression for a cache.
// If we're using blob caching, set it up for the source.
@@ -322,12 +322,12 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
}
cache, err := blobcache.NewBlobCache(src, options.BlobDirectory, compress)
if err != nil {
- return imgID, nil, "", fmt.Errorf("error wrapping image reference %q in blob cache at %q: %w", transports.ImageName(src), options.BlobDirectory, err)
+ return imgID, nil, "", fmt.Errorf("wrapping image reference %q in blob cache at %q: %w", transports.ImageName(src), options.BlobDirectory, err)
}
maybeCachedSrc = cache
cache, err = blobcache.NewBlobCache(dest, options.BlobDirectory, compress)
if err != nil {
- return imgID, nil, "", fmt.Errorf("error wrapping image reference %q in blob cache at %q: %w", transports.ImageName(dest), options.BlobDirectory, err)
+ return imgID, nil, "", fmt.Errorf("wrapping image reference %q in blob cache at %q: %w", transports.ImageName(dest), options.BlobDirectory, err)
}
maybeCachedDest = cache
}
@@ -348,7 +348,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
var manifestBytes []byte
if manifestBytes, err = retryCopyImage(ctx, policyContext, maybeCachedDest, maybeCachedSrc, dest, getCopyOptions(b.store, options.ReportWriter, nil, systemContext, "", false, options.SignBy, options.OciEncryptLayers, options.OciEncryptConfig, nil), options.MaxRetries, options.RetryDelay); err != nil {
- return imgID, nil, "", fmt.Errorf("error copying layers and metadata for container %q: %w", b.ContainerID, err)
+ return imgID, nil, "", fmt.Errorf("copying layers and metadata for container %q: %w", b.ContainerID, err)
}
// If we've got more names to attach, and we know how to do that for
// the transport that we're writing the new image to, add them now.
@@ -357,10 +357,10 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
case is.Transport.Name():
img, err := is.Transport.GetStoreImage(b.store, dest)
if err != nil {
- return imgID, nil, "", fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err)
+ return imgID, nil, "", fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err)
}
if err = util.AddImageNames(b.store, "", systemContext, img, options.AdditionalTags); err != nil {
- return imgID, nil, "", fmt.Errorf("error setting image names to %v: %w", append(img.Names, options.AdditionalTags...), err)
+ return imgID, nil, "", fmt.Errorf("setting image names to %v: %w", append(img.Names, options.AdditionalTags...), err)
}
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
default:
@@ -370,7 +370,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
img, err := is.Transport.GetStoreImage(b.store, dest)
if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
- return imgID, nil, "", fmt.Errorf("error locating image %q in local storage: %w", transports.ImageName(dest), err)
+ return imgID, nil, "", fmt.Errorf("locating image %q in local storage: %w", transports.ImageName(dest), err)
}
if err == nil {
imgID = img.ID
@@ -387,7 +387,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
logrus.Debugf("removing %v from assigned names to image %q", nameToRemove, img.ID)
dest2, err := is.Transport.ParseStoreReference(b.store, "@"+imgID)
if err != nil {
- return imgID, nil, "", fmt.Errorf("error creating unnamed destination reference for image: %w", err)
+ return imgID, nil, "", fmt.Errorf("creating unnamed destination reference for image: %w", err)
}
dest = dest2
}
@@ -400,7 +400,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
- return imgID, nil, "", fmt.Errorf("error computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
+ return imgID, nil, "", fmt.Errorf("computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
}
var ref reference.Canonical
diff --git a/vendor/github.com/containers/buildah/config.go b/vendor/github.com/containers/buildah/config.go
index aa6290186..a9883a595 100644
--- a/vendor/github.com/containers/buildah/config.go
+++ b/vendor/github.com/containers/buildah/config.go
@@ -28,7 +28,7 @@ import (
func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.Image, wantedManifestMIMEType string) error {
_, actualManifestMIMEType, err := img.Manifest(ctx)
if err != nil {
- return fmt.Errorf("error getting manifest MIME type for %q: %w", transports.ImageName(img.Reference()), err)
+ return fmt.Errorf("getting manifest MIME type for %q: %w", transports.ImageName(img.Reference()), err)
}
if wantedManifestMIMEType != actualManifestMIMEType {
layerInfos := img.LayerInfos()
@@ -46,16 +46,16 @@ func unmarshalConvertedConfig(ctx context.Context, dest interface{}, img types.I
ManifestMIMEType: wantedManifestMIMEType,
})
if err != nil {
- return fmt.Errorf("error converting image %q from %q to %q: %w", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType, err)
+ return fmt.Errorf("converting image %q from %q to %q: %w", transports.ImageName(img.Reference()), actualManifestMIMEType, wantedManifestMIMEType, err)
}
img = secondUpdatedImg
}
config, err := img.ConfigBlob(ctx)
if err != nil {
- return fmt.Errorf("error reading %s config from %q: %w", wantedManifestMIMEType, transports.ImageName(img.Reference()), err)
+ return fmt.Errorf("reading %s config from %q: %w", wantedManifestMIMEType, transports.ImageName(img.Reference()), err)
}
if err := json.Unmarshal(config, dest); err != nil {
- return fmt.Errorf("error parsing %s configuration %q from %q: %w", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference()), err)
+ return fmt.Errorf("parsing %s configuration %q from %q: %w", wantedManifestMIMEType, string(config), transports.ImageName(img.Reference()), err)
}
return nil
}
@@ -64,11 +64,11 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy
if img != nil { // A pre-existing image, as opposed to a "FROM scratch" new one.
rawManifest, manifestMIMEType, err := img.Manifest(ctx)
if err != nil {
- return fmt.Errorf("error reading image manifest for %q: %w", transports.ImageName(img.Reference()), err)
+ return fmt.Errorf("reading image manifest for %q: %w", transports.ImageName(img.Reference()), err)
}
rawConfig, err := img.ConfigBlob(ctx)
if err != nil {
- return fmt.Errorf("error reading image configuration for %q: %w", transports.ImageName(img.Reference()), err)
+ return fmt.Errorf("reading image configuration for %q: %w", transports.ImageName(img.Reference()), err)
}
b.Manifest = rawManifest
b.Config = rawConfig
@@ -89,7 +89,7 @@ func (b *Builder) initConfig(ctx context.Context, img types.Image, sys *types.Sy
// Attempt to recover format-specific data from the manifest.
v1Manifest := ociv1.Manifest{}
if err := json.Unmarshal(b.Manifest, &v1Manifest); err != nil {
- return fmt.Errorf("error parsing OCI manifest %q: %w", string(b.Manifest), err)
+ return fmt.Errorf("parsing OCI manifest %q: %w", string(b.Manifest), err)
}
for k, v := range v1Manifest.Annotations {
// NOTE: do not override annotations that are
diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go
index de464ab52..fd4c6b394 100644
--- a/vendor/github.com/containers/buildah/copier/copier.go
+++ b/vendor/github.com/containers/buildah/copier/copier.go
@@ -463,7 +463,7 @@ func convertToRelSubdirectory(root, directory string) (relative string, err erro
}
rel, err := filepath.Rel(root, directory)
if err != nil {
- return "", fmt.Errorf("error computing path of %q relative to %q: %w", directory, root, err)
+ return "", fmt.Errorf("computing path of %q relative to %q: %w", directory, root, err)
}
return cleanerReldirectory(rel), nil
}
@@ -471,7 +471,7 @@ func convertToRelSubdirectory(root, directory string) (relative string, err erro
func currentVolumeRoot() (string, error) {
cwd, err := os.Getwd()
if err != nil {
- return "", fmt.Errorf("error getting current working directory: %w", err)
+ return "", fmt.Errorf("getting current working directory: %w", err)
}
return filepath.VolumeName(cwd) + string(os.PathSeparator), nil
}
@@ -479,7 +479,7 @@ func currentVolumeRoot() (string, error) {
func isVolumeRoot(candidate string) (bool, error) {
abs, err := filepath.Abs(candidate)
if err != nil {
- return false, fmt.Errorf("error converting %q to an absolute path: %w", candidate, err)
+ return false, fmt.Errorf("converting %q to an absolute path: %w", candidate, err)
}
return abs == filepath.VolumeName(abs)+string(os.PathSeparator), nil
}
@@ -493,7 +493,7 @@ func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response,
if req.Root == "" {
wd, err := os.Getwd()
if err != nil {
- return nil, fmt.Errorf("error getting current working directory: %w", err)
+ return nil, fmt.Errorf("getting current working directory: %w", err)
}
req.Directory = wd
} else {
@@ -503,19 +503,19 @@ func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response,
if req.Root == "" {
root, err := currentVolumeRoot()
if err != nil {
- return nil, fmt.Errorf("error determining root of current volume: %w", err)
+ return nil, fmt.Errorf("determining root of current volume: %w", err)
}
req.Root = root
}
if filepath.IsAbs(req.Directory) {
_, err := convertToRelSubdirectory(req.Root, req.Directory)
if err != nil {
- return nil, fmt.Errorf("error rewriting %q to be relative to %q: %w", req.Directory, req.Root, err)
+ return nil, fmt.Errorf("rewriting %q to be relative to %q: %w", req.Directory, req.Root, err)
}
}
isAlreadyRoot, err := isVolumeRoot(req.Root)
if err != nil {
- return nil, fmt.Errorf("error checking if %q is a root directory: %w", req.Root, err)
+ return nil, fmt.Errorf("checking if %q is a root directory: %w", req.Root, err)
}
if !isAlreadyRoot && canChroot {
return copierWithSubprocess(bulkReader, bulkWriter, req)
@@ -610,7 +610,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
cmd.Stderr = &errorBuffer
cmd.ExtraFiles = []*os.File{bulkReaderRead, bulkWriterWrite}
if err = cmd.Start(); err != nil {
- return nil, fmt.Errorf("error starting subprocess: %w", err)
+ return nil, fmt.Errorf("starting subprocess: %w", err)
}
cmdToWaitFor := cmd
defer func() {
@@ -632,7 +632,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
bulkWriterWrite = nil
killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam
if err2 := cmd.Process.Kill(); err2 != nil {
- return nil, fmt.Errorf("error killing subprocess: %v; %s: %w", err2, step, err)
+ return nil, fmt.Errorf("killing subprocess: %v; %s: %w", err2, step, err)
}
return nil, fmt.Errorf("%v: %w", step, err)
}
@@ -690,10 +690,10 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques
}
}
if readError != nil {
- return nil, fmt.Errorf("error passing bulk input to subprocess: %w", readError)
+ return nil, fmt.Errorf("passing bulk input to subprocess: %w", readError)
}
if writeError != nil {
- return nil, fmt.Errorf("error passing bulk output from subprocess: %w", writeError)
+ return nil, fmt.Errorf("passing bulk output from subprocess: %w", writeError)
}
return resp, nil
}
@@ -845,7 +845,7 @@ func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*re
excludes := req.Excludes()
pm, err := fileutils.NewPatternMatcher(excludes)
if err != nil {
- return nil, nil, fmt.Errorf("error processing excludes list %v: %w", excludes, err)
+ return nil, nil, fmt.Errorf("processing excludes list %v: %w", excludes, err)
}
var idMappings *idtools.IDMappings
@@ -915,7 +915,7 @@ func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bo
func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.PatternMatcher) (string, error) {
rel, err := convertToRelSubdirectory(root, path)
if err != nil {
- return "", fmt.Errorf("error making path %q relative to %q", path, root)
+ return "", fmt.Errorf("making path %q relative to %q", path, root)
}
workingPath := root
followed := 0
@@ -952,7 +952,7 @@ func resolvePath(root, path string, evaluateFinalComponent bool, pm *fileutils.P
// resolve the remaining components
rel, err := convertToRelSubdirectory(root, filepath.Join(workingPath, target))
if err != nil {
- return "", fmt.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root)
+ return "", fmt.Errorf("making path %q relative to %q", filepath.Join(workingPath, target), root)
}
workingPath = root
components = append(strings.Split(filepath.Clean(string(os.PathSeparator)+rel), string(os.PathSeparator)), components[1:]...)
@@ -1357,7 +1357,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
// build the header using the name provided
hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget)
if err != nil {
- return fmt.Errorf("error generating tar header for %s (%s): %w", contentPath, symlinkTarget, err)
+ return fmt.Errorf("generating tar header for %s (%s): %w", contentPath, symlinkTarget, err)
}
if name != "" {
hdr.Name = filepath.ToSlash(name)
@@ -1379,7 +1379,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if !options.StripXattrs {
xattrs, err = Lgetxattrs(contentPath)
if err != nil {
- return fmt.Errorf("error getting extended attributes for %q: %w", contentPath, err)
+ return fmt.Errorf("getting extended attributes for %q: %w", contentPath, err)
}
}
hdr.Xattrs = xattrs // nolint:staticcheck
@@ -1391,12 +1391,12 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
if options.ExpandArchives && isArchivePath(contentPath) {
f, err := os.Open(contentPath)
if err != nil {
- return fmt.Errorf("error opening file for reading archive contents: %w", err)
+ return fmt.Errorf("opening file for reading archive contents: %w", err)
}
defer f.Close()
rc, _, err := compression.AutoDecompress(f)
if err != nil {
- return fmt.Errorf("error decompressing %s: %w", contentPath, err)
+ return fmt.Errorf("decompressing %s: %w", contentPath, err)
}
defer rc.Close()
tr := tar.NewReader(rc)
@@ -1406,22 +1406,22 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
hdr.Name = handleRename(options.Rename, hdr.Name)
}
if err = tw.WriteHeader(hdr); err != nil {
- return fmt.Errorf("error writing tar header from %q to pipe: %w", contentPath, err)
+ return fmt.Errorf("writing tar header from %q to pipe: %w", contentPath, err)
}
if hdr.Size != 0 {
n, err := io.Copy(tw, tr)
if err != nil {
- return fmt.Errorf("error extracting content from archive %s: %s: %w", contentPath, hdr.Name, err)
+ return fmt.Errorf("extracting content from archive %s: %s: %w", contentPath, hdr.Name, err)
}
if n != hdr.Size {
- return fmt.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
+ return fmt.Errorf("extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
}
tw.Flush()
}
hdr, err = tr.Next()
}
if err != io.EOF {
- return fmt.Errorf("error extracting contents of archive %s: %w", contentPath, err)
+ return fmt.Errorf("extracting contents of archive %s: %w", contentPath, err)
}
return nil
}
@@ -1443,7 +1443,7 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
hostPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
hdr.Uid, hdr.Gid, err = idMappings.ToContainer(hostPair)
if err != nil {
- return fmt.Errorf("error mapping host filesystem owners %#v to container filesystem owners: %w", hostPair, err)
+ return fmt.Errorf("mapping host filesystem owners %#v to container filesystem owners: %w", hostPair, err)
}
}
// force ownership and/or permissions, if requested
@@ -1467,29 +1467,29 @@ func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath str
// open the file first so that we don't write a header for it if we can't actually read it
f, err = os.Open(contentPath)
if err != nil {
- return fmt.Errorf("error opening file for adding its contents to archive: %w", err)
+ return fmt.Errorf("opening file for adding its contents to archive: %w", err)
}
defer f.Close()
} else if hdr.Typeflag == tar.TypeDir {
// open the directory file first to make sure we can access it.
f, err = os.Open(contentPath)
if err != nil {
- return fmt.Errorf("error opening directory for adding its contents to archive: %w", err)
+ return fmt.Errorf("opening directory for adding its contents to archive: %w", err)
}
defer f.Close()
}
// output the header
if err = tw.WriteHeader(hdr); err != nil {
- return fmt.Errorf("error writing header for %s (%s): %w", contentPath, hdr.Name, err)
+ return fmt.Errorf("writing header for %s (%s): %w", contentPath, hdr.Name, err)
}
if hdr.Typeflag == tar.TypeReg {
// output the content
n, err := io.Copy(tw, f)
if err != nil {
- return fmt.Errorf("error copying %s: %w", contentPath, err)
+ return fmt.Errorf("copying %s: %w", contentPath, err)
}
if n != hdr.Size {
- return fmt.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
+ return fmt.Errorf("copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
}
tw.Flush()
}
@@ -1671,7 +1671,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
containerPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
hostPair, err := idMappings.ToHost(containerPair)
if err != nil {
- return fmt.Errorf("error mapping container filesystem owner 0,0 to host filesystem owners: %w", err)
+ return fmt.Errorf("mapping container filesystem owner 0,0 to host filesystem owners: %w", err)
}
hdr.Uid, hdr.Gid = hostPair.UID, hostPair.GID
}
@@ -1736,7 +1736,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
hdr.Linkname = handleRename(req.PutOptions.Rename, hdr.Linkname)
}
if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), true, nil); err != nil {
- return fmt.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
+ return fmt.Errorf("resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
}
if err = os.Link(linkTarget, path); err != nil && errors.Is(err, os.ErrExist) {
if req.PutOptions.NoOverwriteDirNonDir {
@@ -1869,7 +1869,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
mode |= syscall.S_ISVTX
}
if err = syscall.Chmod(path, uint32(mode)); err != nil {
- return fmt.Errorf("error setting additional permissions on %q to 0%o: %w", path, mode, err)
+ return fmt.Errorf("setting additional permissions on %q to 0%o: %w", path, mode, err)
}
}
// set xattrs, including some that might have been reset by chown()
@@ -1885,13 +1885,13 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM
hdr.AccessTime = hdr.ModTime
}
if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil {
- return fmt.Errorf("error setting access and modify timestamps on %q to %s and %s: %w", path, hdr.AccessTime, hdr.ModTime, err)
+ return fmt.Errorf("setting access and modify timestamps on %q to %s and %s: %w", path, hdr.AccessTime, hdr.ModTime, err)
}
nextHeader:
hdr, err = tr.Next()
}
if err != io.EOF {
- return fmt.Errorf("error reading tar stream: expected EOF: %w", err)
+ return fmt.Errorf("reading tar stream: expected EOF: %w", err)
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/copier/syscall_unix.go b/vendor/github.com/containers/buildah/copier/syscall_unix.go
index 0f2de9354..99b2ee7b6 100644
--- a/vendor/github.com/containers/buildah/copier/syscall_unix.go
+++ b/vendor/github.com/containers/buildah/copier/syscall_unix.go
@@ -17,13 +17,13 @@ var canChroot = os.Getuid() == 0
func chroot(root string) (bool, error) {
if canChroot {
if err := os.Chdir(root); err != nil {
- return false, fmt.Errorf("error changing to intended-new-root directory %q: %w", root, err)
+ return false, fmt.Errorf("changing to intended-new-root directory %q: %w", root, err)
}
if err := unix.Chroot(root); err != nil {
- return false, fmt.Errorf("error chrooting to directory %q: %w", root, err)
+ return false, fmt.Errorf("chrooting to directory %q: %w", root, err)
}
if err := os.Chdir(string(os.PathSeparator)); err != nil {
- return false, fmt.Errorf("error changing to just-became-root directory %q: %w", root, err)
+ return false, fmt.Errorf("changing to just-became-root directory %q: %w", root, err)
}
return true, nil
}
diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go
index bad057051..f5b2e731f 100644
--- a/vendor/github.com/containers/buildah/copier/xattrs.go
+++ b/vendor/github.com/containers/buildah/copier/xattrs.go
@@ -54,7 +54,7 @@ func Lgetxattrs(path string) (map[string]string, error) {
list = list[:0]
break
}
- return nil, fmt.Errorf("error listing extended attributes of %q: %w", path, err)
+ return nil, fmt.Errorf("listing extended attributes of %q: %w", path, err)
}
list = list[:size]
break
@@ -75,7 +75,7 @@ func Lgetxattrs(path string) (map[string]string, error) {
attributeSize *= 2
continue
}
- return nil, fmt.Errorf("error getting value of extended attribute %q on %q: %w", attribute, path, err)
+ return nil, fmt.Errorf("getting value of extended attribute %q on %q: %w", attribute, path, err)
}
m[attribute] = string(attributeValue[:size])
break
@@ -93,7 +93,7 @@ func Lsetxattrs(path string, xattrs map[string]string) error {
for attribute, value := range xattrs {
if isRelevantXattr(attribute) {
if err := unix.Lsetxattr(path, attribute, []byte(value), 0); err != nil {
- return fmt.Errorf("error setting value of extended attribute %q on %q: %w", attribute, path, err)
+ return fmt.Errorf("setting value of extended attribute %q on %q: %w", attribute, path, err)
}
}
}
diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go
index 352280433..69b2dc9b0 100644
--- a/vendor/github.com/containers/buildah/define/build.go
+++ b/vendor/github.com/containers/buildah/define/build.go
@@ -186,6 +186,10 @@ type BuildOptions struct {
// specified, indicating that the shared, system-wide default policy
// should be used.
SignaturePolicyPath string
+ // SkipUnusedStages allows users to skip stages in a multi-stage builds
+ // which do not contribute anything to the target stage. Expected default
+ // value is true.
+ SkipUnusedStages types.OptionalBool
// ReportWriter is an io.Writer which will be used to report the
// progress of the (possible) pulling of the source image and the
// writing of the new image.
diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go
index 015e1e18d..fb4735baa 100644
--- a/vendor/github.com/containers/buildah/define/types.go
+++ b/vendor/github.com/containers/buildah/define/types.go
@@ -123,11 +123,11 @@ func TempDirForURL(dir, prefix, url string) (name string, subdir string, err err
}
name, err = ioutil.TempDir(dir, prefix)
if err != nil {
- return "", "", fmt.Errorf("error creating temporary directory for %q: %w", url, err)
+ return "", "", fmt.Errorf("creating temporary directory for %q: %w", url, err)
}
urlParsed, err := urlpkg.Parse(url)
if err != nil {
- return "", "", fmt.Errorf("error parsing url %q: %w", url, err)
+ return "", "", fmt.Errorf("parsing url %q: %w", url, err)
}
if strings.HasPrefix(url, "git://") || strings.HasSuffix(urlParsed.Path, ".git") {
combinedOutput, gitSubDir, err := cloneToDirectory(url, name)
diff --git a/vendor/github.com/containers/buildah/delete.go b/vendor/github.com/containers/buildah/delete.go
index 718316844..7adb7c5ee 100644
--- a/vendor/github.com/containers/buildah/delete.go
+++ b/vendor/github.com/containers/buildah/delete.go
@@ -6,7 +6,7 @@ import "fmt"
// be used after this method is called.
func (b *Builder) Delete() error {
if err := b.store.DeleteContainer(b.ContainerID); err != nil {
- return fmt.Errorf("error deleting build container %q: %w", b.ContainerID, err)
+ return fmt.Errorf("deleting build container %q: %w", b.ContainerID, err)
}
b.MountPoint = ""
b.Container = ""
diff --git a/vendor/github.com/containers/buildah/digester.go b/vendor/github.com/containers/buildah/digester.go
index 9455e3680..0ed8fa41f 100644
--- a/vendor/github.com/containers/buildah/digester.go
+++ b/vendor/github.com/containers/buildah/digester.go
@@ -75,7 +75,7 @@ func (t *tarFilterer) Close() error {
err := t.pipeWriter.Close()
t.wg.Wait()
if err != nil {
- return fmt.Errorf("error closing filter pipe: %w", err)
+ return fmt.Errorf("closing filter pipe: %w", err)
}
return t.err
}
@@ -110,7 +110,7 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
if !skip {
err = tarWriter.WriteHeader(hdr)
if err != nil {
- err = fmt.Errorf("error filtering tar header for %q: %w", hdr.Name, err)
+ err = fmt.Errorf("filtering tar header for %q: %w", hdr.Name, err)
break
}
if hdr.Size != 0 {
@@ -122,11 +122,11 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
n, copyErr = io.Copy(tarWriter, tarReader)
}
if copyErr != nil {
- err = fmt.Errorf("error copying content for %q: %w", hdr.Name, copyErr)
+ err = fmt.Errorf("copying content for %q: %w", hdr.Name, copyErr)
break
}
if n != hdr.Size {
- err = fmt.Errorf("error filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n)
+ err = fmt.Errorf("filtering content for %q: expected %d bytes, got %d bytes", hdr.Name, hdr.Size, n)
break
}
}
@@ -134,7 +134,7 @@ func newTarFilterer(writeCloser io.WriteCloser, filter func(hdr *tar.Header) (sk
hdr, err = tarReader.Next()
}
if err != io.EOF {
- filterer.err = fmt.Errorf("error reading tar archive: %w", err)
+ filterer.err = fmt.Errorf("reading tar archive: %w", err)
break
}
filterer.closedLock.Lock()
diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go
index 335a6733c..cc56ff2da 100644
--- a/vendor/github.com/containers/buildah/image.go
+++ b/vendor/github.com/containers/buildah/image.go
@@ -167,7 +167,7 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
var uidMap, gidMap []idtools.IDMap
mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
if err != nil {
- return nil, nil, fmt.Errorf("error mounting container %q: %w", i.containerID, err)
+ return nil, nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
}
pipeReader, pipeWriter := io.Pipe()
errChan := make(chan error, 1)
@@ -190,11 +190,11 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo
}()
return ioutils.NewReadCloserWrapper(pipeReader, func() error {
if err = pipeReader.Close(); err != nil {
- err = fmt.Errorf("error closing tar archive of container %q: %w", i.containerID, err)
+ err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
}
if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
if err2 != nil {
- err2 = fmt.Errorf("error unmounting container %q: %w", i.containerID, err2)
+ err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
}
err = err2
}
@@ -311,7 +311,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Make a temporary directory to hold blobs.
path, err := ioutil.TempDir(os.TempDir(), define.Package)
if err != nil {
- return nil, fmt.Errorf("error creating temporary directory to hold layer blobs: %w", err)
+ return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err)
}
logrus.Debugf("using %q to hold temporary data", path)
defer func() {
@@ -400,7 +400,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Extract this layer, one of possibly many.
rc, err = i.store.Diff("", layerID, diffOptions)
if err != nil {
- return nil, fmt.Errorf("error extracting %s: %w", what, err)
+ return nil, fmt.Errorf("extracting %s: %w", what, err)
}
}
srcHasher := digest.Canonical.Digester()
@@ -408,7 +408,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
rc.Close()
- return nil, fmt.Errorf("error opening file for %s: %w", what, err)
+ return nil, fmt.Errorf("opening file for %s: %w", what, err)
}
counter := ioutils.NewWriteCounter(layerFile)
@@ -427,7 +427,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
if err != nil {
layerFile.Close()
rc.Close()
- return nil, fmt.Errorf("error compressing %s: %w", what, err)
+ return nil, fmt.Errorf("compressing %s: %w", what, err)
}
writer := io.MultiWriter(writeCloser, srcHasher.Hash())
// Scrub any local user names that might correspond to UIDs or GIDs of
@@ -478,11 +478,11 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
}
if err != nil {
- return nil, fmt.Errorf("error storing %s to file: %w", what, err)
+ return nil, fmt.Errorf("storing %s to file: %w", what, err)
}
if i.compression == archive.Uncompressed {
if size != counter.Count {
- return nil, fmt.Errorf("error storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
+ return nil, fmt.Errorf("storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
}
} else {
size = counter.Count
@@ -491,7 +491,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Rename the layer so that we can more easily find it by digest later.
finalBlobName := filepath.Join(path, destHasher.Digest().String())
if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
- return nil, fmt.Errorf("error storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
+ return nil, fmt.Errorf("storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
}
// Add a note in the manifest about the layer. The blobs are identified by their possibly-
// compressed blob digests.
@@ -596,7 +596,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Encode the image configuration blob.
oconfig, err := json.Marshal(&oimage)
if err != nil {
- return nil, fmt.Errorf("error encoding %#v as json: %w", oimage, err)
+ return nil, fmt.Errorf("encoding %#v as json: %w", oimage, err)
}
logrus.Debugf("OCIv1 config = %s", oconfig)
@@ -608,14 +608,14 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Encode the manifest.
omanifestbytes, err := json.Marshal(&omanifest)
if err != nil {
- return nil, fmt.Errorf("error encoding %#v as json: %w", omanifest, err)
+ return nil, fmt.Errorf("encoding %#v as json: %w", omanifest, err)
}
logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
// Encode the image configuration blob.
dconfig, err := json.Marshal(&dimage)
if err != nil {
- return nil, fmt.Errorf("error encoding %#v as json: %w", dimage, err)
+ return nil, fmt.Errorf("encoding %#v as json: %w", dimage, err)
}
logrus.Debugf("Docker v2s2 config = %s", dconfig)
@@ -627,7 +627,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System
// Encode the manifest.
dmanifestbytes, err := json.Marshal(&dmanifest)
if err != nil {
- return nil, fmt.Errorf("error encoding %#v as json: %w", dmanifest, err)
+ return nil, fmt.Errorf("encoding %#v as json: %w", dmanifest, err)
}
logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
@@ -698,7 +698,7 @@ func (i *containerImageRef) Transport() types.ImageTransport {
func (i *containerImageSource) Close() error {
err := os.RemoveAll(i.path)
if err != nil {
- return fmt.Errorf("error removing layer blob directory: %w", err)
+ return fmt.Errorf("removing layer blob directory: %w", err)
}
return nil
}
@@ -764,13 +764,13 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo,
}
if err != nil || layerReadCloser == nil || size == -1 {
logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
- return nil, -1, fmt.Errorf("error opening layer blob: %w", err)
+ return nil, -1, fmt.Errorf("opening layer blob: %w", err)
}
logrus.Debugf("reading layer %q", blob.Digest.String())
closer := func() error {
logrus.Debugf("finished reading layer %q", blob.Digest.String())
if err := layerReadCloser.Close(); err != nil {
- return fmt.Errorf("error closing layer %q after reading: %w", blob.Digest.String(), err)
+ return fmt.Errorf("closing layer %q after reading: %w", blob.Digest.String(), err)
}
return nil
}
@@ -781,7 +781,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
var name reference.Named
container, err := b.store.Container(b.ContainerID)
if err != nil {
- return nil, fmt.Errorf("error locating container %q: %w", b.ContainerID, err)
+ return nil, fmt.Errorf("locating container %q: %w", b.ContainerID, err)
}
if len(container.Names) > 0 {
if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
@@ -798,11 +798,11 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
}
oconfig, err := json.Marshal(&b.OCIv1)
if err != nil {
- return nil, fmt.Errorf("error encoding OCI-format image configuration %#v: %w", b.OCIv1, err)
+ return nil, fmt.Errorf("encoding OCI-format image configuration %#v: %w", b.OCIv1, err)
}
dconfig, err := json.Marshal(&b.Docker)
if err != nil {
- return nil, fmt.Errorf("error encoding docker-format image configuration %#v: %w", b.Docker, err)
+ return nil, fmt.Errorf("encoding docker-format image configuration %#v: %w", b.Docker, err)
}
var created *time.Time
if options.HistoryTimestamp != nil {
@@ -858,7 +858,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR
func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
src, err := b.makeContainerImageRef(options)
if err != nil {
- return nil, nil, fmt.Errorf("error creating image reference for container %q to extract its contents: %w", b.ContainerID, err)
+ return nil, nil, fmt.Errorf("creating image reference for container %q to extract its contents: %w", b.ContainerID, err)
}
return src.extractRootfs(opts)
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index a1810d6ad..293e5bc96 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -68,7 +68,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
}
if len(paths) == 0 {
- return "", nil, errors.New("error building: no dockerfiles specified")
+ return "", nil, errors.New("building: no dockerfiles specified")
}
if len(options.Platforms) > 1 && options.IIDFile != "" {
return "", nil, fmt.Errorf("building multiple images, but iidfile %q can only be used to store one image ID", options.IIDFile)
@@ -138,7 +138,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
dinfo, err = contents.Stat()
if err != nil {
contents.Close()
- return "", nil, fmt.Errorf("error reading info about %q: %w", dfile, err)
+ return "", nil, fmt.Errorf("reading info about %q: %w", dfile, err)
}
if dinfo.Mode().IsRegular() && dinfo.Size() == 0 {
contents.Close()
@@ -171,7 +171,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
if options.JobSemaphore == nil {
if options.Jobs != nil {
if *options.Jobs < 0 {
- return "", nil, errors.New("error building: invalid value for jobs. It must be a positive integer")
+ return "", nil, errors.New("building: invalid value for jobs. It must be a positive integer")
}
if *options.Jobs > 0 {
options.JobSemaphore = semaphore.NewWeighted(int64(*options.Jobs))
@@ -371,10 +371,10 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B
return id, ref, nil
}
-func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logrus.Logger, logPrefix string, options define.BuildOptions, dockerfiles []string, dockerfilecontents [][]byte) (string, reference.Canonical, error) {
+func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logrus.Logger, logPrefix string, options define.BuildOptions, containerFiles []string, dockerfilecontents [][]byte) (string, reference.Canonical, error) {
mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
if err != nil {
- return "", nil, fmt.Errorf("error parsing main Dockerfile: %s: %w", dockerfiles[0], err)
+ return "", nil, fmt.Errorf("parsing main Dockerfile: %s: %w", containerFiles[0], err)
}
warnOnUnsetBuildArgs(logger, mainNode, options.Args)
@@ -416,8 +416,8 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
for i, d := range dockerfilecontents[1:] {
additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d))
if err != nil {
- dockerfiles := dockerfiles[1:]
- return "", nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfiles[i], err)
+ containerFiles := containerFiles[1:]
+ return "", nil, fmt.Errorf("parsing additional Dockerfile %s: %w", containerFiles[i], err)
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
@@ -443,16 +443,16 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
labelLine = fmt.Sprintf("LABEL %q=%q\n", key, value)
additionalNode, err := imagebuilder.ParseDockerfile(strings.NewReader(labelLine))
if err != nil {
- return "", nil, fmt.Errorf("error while adding additional LABEL steps: %w", err)
+ return "", nil, fmt.Errorf("while adding additional LABEL steps: %w", err)
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
}
}
- exec, err := newExecutor(logger, logPrefix, store, options, mainNode)
+ exec, err := newExecutor(logger, logPrefix, store, options, mainNode, containerFiles)
if err != nil {
- return "", nil, fmt.Errorf("error creating build executor: %w", err)
+ return "", nil, fmt.Errorf("creating build executor: %w", err)
}
b := imagebuilder.NewBuilder(options.Args)
defaultContainerConfig, err := config.Default()
@@ -462,7 +462,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr
b.Env = append(defaultContainerConfig.GetDefaultEnv(), b.Env...)
stages, err := imagebuilder.NewStages(mainNode, b)
if err != nil {
- return "", nil, fmt.Errorf("error reading multiple stages: %w", err)
+ return "", nil, fmt.Errorf("reading multiple stages: %w", err)
}
if options.Target != "" {
stagesTargeted, ok := stages.ThroughTarget(options.Target)
@@ -506,7 +506,7 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
cppPath, err := exec.LookPath(cppCommand)
if err != nil {
if errors.Is(err, exec.ErrNotFound) {
- err = fmt.Errorf("error: %v: .in support requires %s to be installed", err, cppCommand)
+ err = fmt.Errorf("%v: .in support requires %s to be installed", err, cppCommand)
}
return nil, err
}
@@ -518,7 +518,7 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
if flags, ok := os.LookupEnv("BUILDAH_CPPFLAGS"); ok {
args, err := shellwords.Parse(flags)
if err != nil {
- return nil, fmt.Errorf("error parsing BUILDAH_CPPFLAGS %q: %v", flags, err)
+ return nil, fmt.Errorf("parsing BUILDAH_CPPFLAGS %q: %v", flags, err)
}
cppArgs = append(cppArgs, args...)
}
@@ -536,7 +536,7 @@ func preprocessContainerfileContents(logger *logrus.Logger, containerfile string
logger.Warnf("Ignoring %s\n", stderrBuffer.String())
}
if stdoutBuffer.Len() == 0 {
- return nil, fmt.Errorf("error preprocessing %s: preprocessor produced no output: %w", containerfile, err)
+ return nil, fmt.Errorf("preprocessing %s: preprocessor produced no output: %w", containerfile, err)
}
}
return &stdoutBuffer, nil
@@ -677,14 +677,14 @@ func platformsForBaseImages(ctx context.Context, logger *logrus.Logger, dockerfi
func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from string, args map[string]string, additionalBuildContext map[string]*define.AdditionalBuildContext) ([]string, error) {
mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0]))
if err != nil {
- return nil, fmt.Errorf("error parsing main Dockerfile: %s: %w", dockerfilenames[0], err)
+ return nil, fmt.Errorf("parsing main Dockerfile: %s: %w", dockerfilenames[0], err)
}
for i, d := range dockerfilecontents[1:] {
additionalNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(d))
if err != nil {
dockerfilenames := dockerfilenames[1:]
- return nil, fmt.Errorf("error parsing additional Dockerfile %s: %w", dockerfilenames[i], err)
+ return nil, fmt.Errorf("parsing additional Dockerfile %s: %w", dockerfilenames[i], err)
}
mainNode.Children = append(mainNode.Children, additionalNode.Children...)
}
@@ -697,7 +697,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri
b.Env = defaultContainerConfig.GetDefaultEnv()
stages, err := imagebuilder.NewStages(mainNode, b)
if err != nil {
- return nil, fmt.Errorf("error reading multiple stages: %w", err)
+ return nil, fmt.Errorf("reading multiple stages: %w", err)
}
var baseImages []string
nicknames := make(map[string]bool)
diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go
index ddd2dfc48..d30b1356e 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go
@@ -82,6 +82,7 @@ type Executor struct {
out io.Writer
err io.Writer
signaturePolicyPath string
+ skipUnusedStages types.OptionalBool
systemContext *types.SystemContext
reportWriter io.Writer
isolation define.Isolation
@@ -151,7 +152,7 @@ type imageTypeAndHistoryAndDiffIDs struct {
}
// newExecutor creates a new instance of the imagebuilder.Executor interface.
-func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node) (*Executor, error) {
+func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node, containerFiles []string) (*Executor, error) {
defaultContainerConfig, err := config.Default()
if err != nil {
return nil, fmt.Errorf("failed to get container config: %w", err)
@@ -159,7 +160,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
excludes := options.Excludes
if len(excludes) == 0 {
- excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile)
+ excludes, options.IgnoreFile, err = parse.ContainerIgnoreFile(options.ContextDirectory, options.IgnoreFile, containerFiles)
if err != nil {
return nil, err
}
@@ -237,6 +238,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o
outputFormat: options.OutputFormat,
additionalTags: options.AdditionalTags,
signaturePolicyPath: options.SignaturePolicyPath,
+ skipUnusedStages: options.SkipUnusedStages,
systemContext: options.SystemContext,
log: options.Log,
in: options.In,
@@ -402,7 +404,7 @@ func (b *Executor) waitForStage(ctx context.Context, name string, stages imagebu
b.stagesSemaphore.Release(1)
time.Sleep(time.Millisecond * 10)
if err := b.stagesSemaphore.Acquire(ctx, 1); err != nil {
- return true, fmt.Errorf("error reacquiring job semaphore: %w", err)
+ return true, fmt.Errorf("reacquiring job semaphore: %w", err)
}
}
}
@@ -417,20 +419,20 @@ func (b *Executor) getImageTypeAndHistoryAndDiffIDs(ctx context.Context, imageID
}
imageRef, err := is.Transport.ParseStoreReference(b.store, "@"+imageID)
if err != nil {
- return "", nil, nil, fmt.Errorf("error getting image reference %q: %w", imageID, err)
+ return "", nil, nil, fmt.Errorf("getting image reference %q: %w", imageID, err)
}
ref, err := imageRef.NewImage(ctx, nil)
if err != nil {
- return "", nil, nil, fmt.Errorf("error creating new image from reference to image %q: %w", imageID, err)
+ return "", nil, nil, fmt.Errorf("creating new image from reference to image %q: %w", imageID, err)
}
defer ref.Close()
oci, err := ref.OCIConfig(ctx)
if err != nil {
- return "", nil, nil, fmt.Errorf("error getting possibly-converted OCI config of image %q: %w", imageID, err)
+ return "", nil, nil, fmt.Errorf("getting possibly-converted OCI config of image %q: %w", imageID, err)
}
manifestBytes, manifestFormat, err := ref.Manifest(ctx)
if err != nil {
- return "", nil, nil, fmt.Errorf("error getting manifest of image %q: %w", imageID, err)
+ return "", nil, nil, fmt.Errorf("getting manifest of image %q: %w", imageID, err)
}
if manifestFormat == "" && len(manifestBytes) > 0 {
manifestFormat = manifest.GuessMIMEType(manifestBytes)
@@ -539,7 +541,7 @@ func markDependencyStagesForTarget(dependencyMap map[string]*stageDependencyInfo
// over each of the one or more parsed Dockerfiles and stages.
func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (imageID string, ref reference.Canonical, err error) {
if len(stages) == 0 {
- return "", nil, errors.New("error building: no stages to build")
+ return "", nil, errors.New("building: no stages to build")
}
var cleanupImages []string
cleanupStages := make(map[int]*StageExecutor)
@@ -792,9 +794,10 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
return
}
// Skip stage if it is not needed by TargetStage
- // or any of its dependency stages.
+ // or any of its dependency stages and `SkipUnusedStages`
+ // is not set to `false`.
if stageDependencyInfo, ok := dependencyMap[stages[index].Name]; ok {
- if !stageDependencyInfo.NeededByTarget {
+ if !stageDependencyInfo.NeededByTarget && b.skipUnusedStages != types.OptionalBoolFalse {
logrus.Debugf("Skipping stage with Name %q and index %d since its not needed by the target stage", stages[index].Name, index)
ch <- Result{
Index: index,
@@ -873,18 +876,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image
case is.Transport.Name():
img, err := is.Transport.GetStoreImage(b.store, dest)
if err != nil {
- return imageID, ref, fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err)
+ return imageID, ref, fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err)
}
if len(b.additionalTags) > 0 {
if err = util.AddImageNames(b.store, "", b.systemContext, img, b.additionalTags); err != nil {
- return imageID, ref, fmt.Errorf("error setting image names to %v: %w", append(img.Names, b.additionalTags...), err)
+ return imageID, ref, fmt.Errorf("setting image names to %v: %w", append(img.Names, b.additionalTags...), err)
}
logrus.Debugf("assigned names %v to image %q", img.Names, img.ID)
}
// Report back the caller the tags applied, if any.
img, err = is.Transport.GetStoreImage(b.store, dest)
if err != nil {
- return imageID, ref, fmt.Errorf("error locating just-written image %q: %w", transports.ImageName(dest), err)
+ return imageID, ref, fmt.Errorf("locating just-written image %q: %w", transports.ImageName(dest), err)
}
for _, name := range img.Names {
fmt.Fprintf(b.out, "Successfully tagged %s\n", name)
diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
index 9d8214fbd..de0e16bcc 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go
@@ -93,10 +93,10 @@ func (s *StageExecutor) Preserve(path string) error {
// except ensure that it exists.
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, filepath.Join(s.mountPoint, path), copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
- return fmt.Errorf("error ensuring volume path exists: %w", err)
+ return fmt.Errorf("ensuring volume path exists: %w", err)
}
if err := s.volumeCacheInvalidate(path); err != nil {
- return fmt.Errorf("error ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
+ return fmt.Errorf("ensuring volume path %q is preserved: %w", filepath.Join(s.mountPoint, path), err)
}
return nil
}
@@ -123,14 +123,14 @@ func (s *StageExecutor) Preserve(path string) error {
archivedPath = evaluated
path = string(os.PathSeparator) + symLink
} else {
- return fmt.Errorf("error evaluating path %q: %w", path, err)
+ return fmt.Errorf("evaluating path %q: %w", path, err)
}
st, err := os.Stat(archivedPath)
if errors.Is(err, os.ErrNotExist) {
createdDirPerms := os.FileMode(0755)
if err = copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
- return fmt.Errorf("error ensuring volume path exists: %w", err)
+ return fmt.Errorf("ensuring volume path exists: %w", err)
}
st, err = os.Stat(archivedPath)
}
@@ -142,7 +142,7 @@ func (s *StageExecutor) Preserve(path string) error {
if !s.volumes.Add(path) {
// This path is not a subdirectory of a volume path that we're
// already preserving, so adding it to the list should work.
- return fmt.Errorf("error adding %q to the volume cache", path)
+ return fmt.Errorf("adding %q to the volume cache", path)
}
s.volumeCache[path] = cacheFile
// Now prune cache files for volumes that are now supplanted by this one.
@@ -207,14 +207,14 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
- return nil, fmt.Errorf("error evaluating volume path: %w", err)
+ return nil, fmt.Errorf("evaluating volume path: %w", err)
}
relativePath, err := filepath.Rel(s.mountPoint, archivedPath)
if err != nil {
- return nil, fmt.Errorf("error converting %q into a path relative to %q: %w", archivedPath, s.mountPoint, err)
+ return nil, fmt.Errorf("converting %q into a path relative to %q: %w", archivedPath, s.mountPoint, err)
}
if strings.HasPrefix(relativePath, ".."+string(os.PathSeparator)) {
- return nil, fmt.Errorf("error converting %q into a path relative to %q", archivedPath, s.mountPoint)
+ return nil, fmt.Errorf("converting %q into a path relative to %q", archivedPath, s.mountPoint)
}
_, err = os.Stat(cacheFile)
if err == nil {
@@ -226,7 +226,7 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
}
createdDirPerms := os.FileMode(0755)
if err := copier.Mkdir(s.mountPoint, archivedPath, copier.MkdirOptions{ChmodNew: &createdDirPerms}); err != nil {
- return nil, fmt.Errorf("error ensuring volume path exists: %w", err)
+ return nil, fmt.Errorf("ensuring volume path exists: %w", err)
}
logrus.Debugf("caching contents of volume %q in %q", archivedPath, cacheFile)
cache, err := os.Create(cacheFile)
@@ -236,12 +236,12 @@ func (s *StageExecutor) volumeCacheSaveVFS() (mounts []specs.Mount, err error) {
defer cache.Close()
rc, err := chrootarchive.Tar(archivedPath, nil, s.mountPoint)
if err != nil {
- return nil, fmt.Errorf("error archiving %q: %w", archivedPath, err)
+ return nil, fmt.Errorf("archiving %q: %w", archivedPath, err)
}
defer rc.Close()
_, err = io.Copy(cache, rc)
if err != nil {
- return nil, fmt.Errorf("error archiving %q to %q: %w", archivedPath, cacheFile, err)
+ return nil, fmt.Errorf("archiving %q to %q: %w", archivedPath, cacheFile, err)
}
mount := specs.Mount{
Source: archivedPath,
@@ -259,7 +259,7 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
for cachedPath, cacheFile := range s.volumeCache {
archivedPath, err := copier.Eval(s.mountPoint, filepath.Join(s.mountPoint, cachedPath), copier.EvalOptions{})
if err != nil {
- return fmt.Errorf("error evaluating volume path: %w", err)
+ return fmt.Errorf("evaluating volume path: %w", err)
}
logrus.Debugf("restoring contents of volume %q from %q", archivedPath, cacheFile)
cache, err := os.Open(cacheFile)
@@ -276,7 +276,7 @@ func (s *StageExecutor) volumeCacheRestoreVFS() (err error) {
}
err = chrootarchive.Untar(cache, archivedPath, nil)
if err != nil {
- return fmt.Errorf("error extracting archive at %q: %w", archivedPath, err)
+ return fmt.Errorf("extracting archive at %q: %w", archivedPath, err)
}
if st, ok := s.volumeCacheInfo[cachedPath]; ok {
if err := os.Chmod(archivedPath, st.Mode()); err != nil {
@@ -488,7 +488,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte
if len(arr) < 2 {
return nil, fmt.Errorf("Invalid --mount command: %s", flag)
}
- tokens := strings.Split(arr[1], ",")
+ tokens := strings.Split(flag, ",")
for _, val := range tokens {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
@@ -593,7 +593,7 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
if stdin == nil {
devNull, err := os.Open(os.DevNull)
if err != nil {
- return fmt.Errorf("error opening %q for reading: %v", os.DevNull, err)
+ return fmt.Errorf("opening %q for reading: %v", os.DevNull, err)
}
defer devNull.Close()
stdin = devNull
@@ -602,6 +602,7 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
Args: s.executor.runtimeArgs,
Cmd: config.Cmd,
ContextDir: s.executor.contextDir,
+ ConfigureNetwork: s.executor.configureNetwork,
Entrypoint: config.Entrypoint,
Env: config.Env,
Hostname: config.Hostname,
@@ -624,10 +625,9 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error {
User: config.User,
WorkingDir: config.WorkingDir,
}
+
if config.NetworkDisabled {
options.ConfigureNetwork = buildah.NetworkDisabled
- } else {
- options.ConfigureNetwork = buildah.NetworkEnabled
}
args := run.Args
@@ -686,7 +686,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
base, err := ib.From(node)
if err != nil {
logrus.Debugf("prepare(node.Children=%#v)", node.Children)
- return nil, fmt.Errorf("error determining starting point for build: %w", err)
+ return nil, fmt.Errorf("determining starting point for build: %w", err)
}
from = base
}
@@ -755,7 +755,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
builder, err = buildah.NewBuilder(ctx, s.executor.store, builderOptions)
if err != nil {
- return nil, fmt.Errorf("error creating build container: %w", err)
+ return nil, fmt.Errorf("creating build container: %w", err)
}
// If executor's ProcessLabel and MountLabel is empty means this is the first stage
@@ -817,7 +817,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to update: %v", err2)
}
- return nil, fmt.Errorf("error updating build context: %w", err)
+ return nil, fmt.Errorf("updating build context: %w", err)
}
}
mountPoint, err := builder.Mount(builder.MountLabel)
@@ -825,7 +825,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo
if err2 := builder.Delete(); err2 != nil {
logrus.Debugf("error deleting container which we failed to mount: %v", err2)
}
- return nil, fmt.Errorf("error mounting new container: %w", err)
+ return nil, fmt.Errorf("mounting new container: %w", err)
}
if rebase {
// Make this our "current" working container.
@@ -1014,7 +1014,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// the case, we need to commit() to create a new image.
logCommit(s.output, -1)
if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash); err != nil {
- return "", nil, fmt.Errorf("error committing base container: %w", err)
+ return "", nil, fmt.Errorf("committing base container: %w", err)
}
// Generate build output if needed.
if canGenerateBuildOutput {
@@ -1064,7 +1064,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// Resolve any arguments in this instruction.
step := ib.Step()
if err := step.Resolve(node); err != nil {
- return "", nil, fmt.Errorf("error resolving step %+v: %w", *node, err)
+ return "", nil, fmt.Errorf("resolving step %+v: %w", *node, err)
}
logrus.Debugf("Parsed Step: %+v", *step)
if !s.executor.quiet {
@@ -1150,7 +1150,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("Error building at step %+v: %v", *step, err)
- return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
+ return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
}
// In case we added content, retrieve its digest.
addedContentSummary := s.getContentSummaryAfterAddingContent()
@@ -1175,7 +1175,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
logCommit(s.output, i)
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash)
if err != nil {
- return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err)
+ return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err)
}
logImageID(imgID)
// Generate build output if needed.
@@ -1236,7 +1236,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if canMatchCacheOnlyAfterRun {
if err = ib.Run(step, s, noRunsRemaining); err != nil {
logrus.Debugf("Error building at step %+v: %v", *step, err)
- return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
+ return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
}
// Retrieve the digest info for the content that we just copied
// into the rootfs.
@@ -1251,7 +1251,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
}
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
- return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err)
+ return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
}
// All the best effort to find cache on localstorage have failed try pulling
// cache from remote repo if `--cache-from` was configured.
@@ -1263,7 +1263,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
logCachePulled(cacheKey)
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
- return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err)
+ return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
}
if cacheID != "" {
pulledAndUsedCacheImage = true
@@ -1282,7 +1282,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// Process the instruction directly.
if err = ib.Run(step, s, noRunsRemaining); err != nil {
logrus.Debugf("Error building at step %+v: %v", *step, err)
- return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
+ return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
}
// In case we added content, retrieve its digest.
@@ -1300,7 +1300,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
if checkForLayers {
cacheID, err = s.intermediateImageExists(ctx, node, addedContentSummary, s.stepRequiresLayer(step))
if err != nil {
- return "", nil, fmt.Errorf("error checking if cached image exists from a previous build: %w", err)
+ return "", nil, fmt.Errorf("checking if cached image exists from a previous build: %w", err)
}
}
} else {
@@ -1318,7 +1318,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
err := ib.Run(step, s, noRunsRemaining)
if err != nil {
logrus.Debugf("Error building at step %+v: %v", *step, err)
- return "", nil, fmt.Errorf("error building at STEP \"%s\": %w", step.Message, err)
+ return "", nil, fmt.Errorf("building at STEP \"%s\": %w", step.Message, err)
}
}
}
@@ -1351,7 +1351,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// can be part of build-cache.
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false)
if err != nil {
- return "", nil, fmt.Errorf("error committing container for step %+v: %w", *step, err)
+ return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err)
}
// Generate build output if needed.
if canGenerateBuildOutput {
@@ -1385,7 +1385,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// is the last instruction of the last stage.
imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true)
if err != nil {
- return "", nil, fmt.Errorf("error committing final squash step %+v: %w", *step, err)
+ return "", nil, fmt.Errorf("committing final squash step %+v: %w", *step, err)
}
// Generate build output if needed.
if canGenerateBuildOutput {
@@ -1436,7 +1436,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string,
// ID that we really should not be pulling anymore (see
// containers/podman/issues/10307).
if _, err := s.prepare(ctx, imgID, false, true, define.PullNever); err != nil {
- return "", nil, fmt.Errorf("error preparing container for next step: %w", err)
+ return "", nil, fmt.Errorf("preparing container for next step: %w", err)
}
}
}
@@ -1648,27 +1648,27 @@ func (s *StageExecutor) tagExistingImage(ctx context.Context, cacheID, output st
// Look up the source image, expecting it to be in local storage
src, err := is.Transport.ParseStoreReference(s.executor.store, cacheID)
if err != nil {
- return "", nil, fmt.Errorf("error getting source imageReference for %q: %w", cacheID, err)
+ return "", nil, fmt.Errorf("getting source imageReference for %q: %w", cacheID, err)
}
options := cp.Options{
RemoveSignatures: true, // more like "ignore signatures", since they don't get removed when src and dest are the same image
}
manifestBytes, err := cp.Image(ctx, policyContext, dest, src, &options)
if err != nil {
- return "", nil, fmt.Errorf("error copying image %q: %w", cacheID, err)
+ return "", nil, fmt.Errorf("copying image %q: %w", cacheID, err)
}
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
- return "", nil, fmt.Errorf("error computing digest of manifest for image %q: %w", cacheID, err)
+ return "", nil, fmt.Errorf("computing digest of manifest for image %q: %w", cacheID, err)
}
img, err := is.Transport.GetStoreImage(s.executor.store, dest)
if err != nil {
- return "", nil, fmt.Errorf("error locating new copy of image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
+ return "", nil, fmt.Errorf("locating new copy of image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
}
var ref reference.Canonical
if dref := dest.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
- return "", nil, fmt.Errorf("error computing canonical reference for new image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
+ return "", nil, fmt.Errorf("computing canonical reference for new image %q (i.e., %q): %w", cacheID, transports.ImageName(dest), err)
}
}
return img.ID, ref, nil
@@ -1688,7 +1688,7 @@ func (s *StageExecutor) generateCacheKey(ctx context.Context, currNode *parser.N
if s.builder.FromImageID != "" {
manifestType, baseHistory, diffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
- return "", fmt.Errorf("error getting history of base image %q: %w", s.builder.FromImageID, err)
+ return "", fmt.Errorf("getting history of base image %q: %w", s.builder.FromImageID, err)
}
for i := 0; i < len(diffIDs); i++ {
fmt.Fprintln(hash, diffIDs[i].String())
@@ -1788,14 +1788,14 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
// Get the list of images available in the image store
images, err := s.executor.store.Images()
if err != nil {
- return "", fmt.Errorf("error getting image list from store: %w", err)
+ return "", fmt.Errorf("getting image list from store: %w", err)
}
var baseHistory []v1.History
var baseDiffIDs []digest.Digest
if s.builder.FromImageID != "" {
_, baseHistory, baseDiffIDs, err = s.executor.getImageTypeAndHistoryAndDiffIDs(ctx, s.builder.FromImageID)
if err != nil {
- return "", fmt.Errorf("error getting history of base image %q: %w", s.builder.FromImageID, err)
+ return "", fmt.Errorf("getting history of base image %q: %w", s.builder.FromImageID, err)
}
}
for _, image := range images {
@@ -1815,7 +1815,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p
if image.TopLayer != "" {
imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
if err != nil {
- return "", fmt.Errorf("error getting top layer info: %w", err)
+ return "", fmt.Errorf("getting top layer info: %w", err)
}
// Figure out which layer from this image we should
// compare our container's base layer to.
@@ -2010,7 +2010,7 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer
if imageRef != nil {
if dref := imageRef.DockerReference(); dref != nil {
if ref, err = reference.WithDigest(dref, manifestDigest); err != nil {
- return "", nil, fmt.Errorf("error computing canonical reference for new image %q: %w", imgID, err)
+ return "", nil, fmt.Errorf("computing canonical reference for new image %q: %w", imgID, err)
}
}
}
diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go
index 70dccad94..9f925a1db 100644
--- a/vendor/github.com/containers/buildah/import.go
+++ b/vendor/github.com/containers/buildah/import.go
@@ -34,14 +34,14 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
}
src, err := ref.NewImageSource(ctx, systemContext)
if err != nil {
- return nil, fmt.Errorf("error instantiating image source: %w", err)
+ return nil, fmt.Errorf("instantiating image source: %w", err)
}
defer src.Close()
imageDigest := ""
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
if err != nil {
- return nil, fmt.Errorf("error loading image manifest for %q: %w", transports.ImageName(ref), err)
+ return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
}
if manifestDigest, err := manifest.Digest(manifestBytes); err == nil {
imageDigest = manifestDigest.String()
@@ -51,18 +51,18 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
if manifest.MIMETypeIsMultiImage(manifestType) {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
- return nil, fmt.Errorf("error parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
+ return nil, fmt.Errorf("parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
}
instance, err := list.ChooseInstance(systemContext)
if err != nil {
- return nil, fmt.Errorf("error finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
+ return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
}
instanceDigest = &instance
}
image, err := image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(src, instanceDigest))
if err != nil {
- return nil, fmt.Errorf("error instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
+ return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
}
imageName := ""
@@ -73,7 +73,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
if img.TopLayer != "" {
layer, err4 := store.Layer(img.TopLayer)
if err4 != nil {
- return nil, fmt.Errorf("error reading information about image's top layer: %w", err4)
+ return nil, fmt.Errorf("reading information about image's top layer: %w", err4)
}
uidmap, gidmap = convertStorageIDMaps(layer.UIDMap, layer.GIDMap)
}
@@ -110,7 +110,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system
}
if err := builder.initConfig(ctx, image, systemContext); err != nil {
- return nil, fmt.Errorf("error preparing image configuration: %w", err)
+ return nil, fmt.Errorf("preparing image configuration: %w", err)
}
return builder, nil
@@ -147,7 +147,7 @@ func importBuilder(ctx context.Context, store storage.Store, options ImportOptio
err = builder.Save()
if err != nil {
- return nil, fmt.Errorf("error saving builder state: %w", err)
+ return nil, fmt.Errorf("saving builder state: %w", err)
}
return builder, nil
@@ -167,7 +167,7 @@ func importBuilderFromImage(ctx context.Context, store storage.Store, options Im
builder, err := importBuilderDataFromImage(ctx, store, systemContext, img.ID, "", "")
if err != nil {
- return nil, fmt.Errorf("error importing build settings from image %q: %w", options.Image, err)
+ return nil, fmt.Errorf("importing build settings from image %q: %w", options.Image, err)
}
builder.setupLogger()
diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md
index 4bd6aa821..ce9b38030 100644
--- a/vendor/github.com/containers/buildah/install.md
+++ b/vendor/github.com/containers/buildah/install.md
@@ -23,11 +23,11 @@ sudo yum -y install buildah
#### [Debian](https://debian.org)
The buildah package is available in
-the [Bullseye (testing) branch](https://packages.debian.org/bullseye/buildah), which
-will be the next stable release (Debian 11) as well as Debian Unstable/Sid.
+the [Bullseye](https://packages.debian.org/bullseye/buildah), which
+is the current stable release (Debian 11), as well as Debian Unstable/Sid.
```bash
-# Debian Testing/Bullseye or Unstable/Sid
+# Debian Stable/Bullseye or Unstable/Sid
sudo apt-get update
sudo apt-get -y install buildah
```
diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go
index f0ea3c820..23af9aeb0 100644
--- a/vendor/github.com/containers/buildah/internal/parse/parse.go
+++ b/vendor/github.com/containers/buildah/internal/parse/parse.go
@@ -59,6 +59,9 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
case "bind-nonrecursive":
newMount.Options = append(newMount.Options, "bind")
bindNonRecursive = true
@@ -209,6 +212,9 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
case "nosuid", "nodev", "noexec":
// TODO: detect duplication of these options.
// (Is this necessary?)
@@ -497,6 +503,8 @@ func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string,
// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ...
// buildah run --mount type=tmpfs,target=/dev/shm ...
func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string) (map[string]specs.Mount, []string, []string, error) {
+ // If `type` is not set default to "bind"
+ mountType := TypeBind
finalMounts := make(map[string]specs.Mount)
mountedImages := make([]string, 0)
lockedTargets := make([]string, 0)
@@ -507,19 +515,20 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
// to allow a more robust parsing of the mount format and to give
// precise errors regarding supported format versus supported options.
for _, mount := range mounts {
- arr := strings.SplitN(mount, ",", 2)
- if len(arr) < 2 {
+ tokens := strings.Split(mount, ",")
+ if len(tokens) < 2 {
return nil, mountedImages, lockedTargets, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
}
- kv := strings.Split(arr[0], "=")
- // TODO: type is not explicitly required in Docker.
- // If not specified, it defaults to "volume".
- if len(kv) != 2 || kv[0] != "type" {
- return nil, mountedImages, lockedTargets, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
+ for _, field := range tokens {
+ if strings.HasPrefix(field, "type=") {
+ kv := strings.Split(field, "=")
+ if len(kv) != 2 {
+ return nil, mountedImages, lockedTargets, fmt.Errorf("%q: %w", mount, errInvalidSyntax)
+ }
+ mountType = kv[1]
+ }
}
-
- tokens := strings.Split(arr[1], ",")
- switch kv[1] {
+ switch mountType {
case TypeBind:
mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil)
if err != nil {
@@ -550,7 +559,7 @@ func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, c
}
finalMounts[mount.Destination] = mount
default:
- return nil, mountedImages, lockedTargets, fmt.Errorf("invalid filesystem type %q", kv[1])
+ return nil, mountedImages, lockedTargets, fmt.Errorf("invalid filesystem type %q", mountType)
}
}
@@ -569,6 +578,9 @@ func GetTmpfsMount(args []string) (specs.Mount, error) {
for _, val := range args {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
case "ro", "nosuid", "nodev", "noexec":
newMount.Options = append(newMount.Options, kv[0])
case "readonly":
diff --git a/vendor/github.com/containers/buildah/mount.go b/vendor/github.com/containers/buildah/mount.go
index 3b1ff5820..932c1bb78 100644
--- a/vendor/github.com/containers/buildah/mount.go
+++ b/vendor/github.com/containers/buildah/mount.go
@@ -7,13 +7,13 @@ import "fmt"
func (b *Builder) Mount(label string) (string, error) {
mountpoint, err := b.store.Mount(b.ContainerID, label)
if err != nil {
- return "", fmt.Errorf("error mounting build container %q: %w", b.ContainerID, err)
+ return "", fmt.Errorf("mounting build container %q: %w", b.ContainerID, err)
}
b.MountPoint = mountpoint
err = b.Save()
if err != nil {
- return "", fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err)
+ return "", fmt.Errorf("saving updated state for build container %q: %w", b.ContainerID, err)
}
return mountpoint, nil
}
@@ -21,7 +21,7 @@ func (b *Builder) Mount(label string) (string, error) {
func (b *Builder) setMountPoint(mountPoint string) error {
b.MountPoint = mountPoint
if err := b.Save(); err != nil {
- return fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err)
+ return fmt.Errorf("saving updated state for build container %q: %w", b.ContainerID, err)
}
return nil
}
@@ -30,17 +30,17 @@ func (b *Builder) setMountPoint(mountPoint string) error {
func (b *Builder) Mounted() (bool, error) {
mountCnt, err := b.store.Mounted(b.ContainerID)
if err != nil {
- return false, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err)
+ return false, fmt.Errorf("determining if mounting build container %q is mounted: %w", b.ContainerID, err)
}
mounted := mountCnt > 0
if mounted && b.MountPoint == "" {
ctr, err := b.store.Container(b.ContainerID)
if err != nil {
- return mountCnt > 0, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err)
+ return mountCnt > 0, fmt.Errorf("determining if mounting build container %q is mounted: %w", b.ContainerID, err)
}
layer, err := b.store.Layer(ctr.LayerID)
if err != nil {
- return mountCnt > 0, fmt.Errorf("error determining if mounting build container %q is mounted: %w", b.ContainerID, err)
+ return mountCnt > 0, fmt.Errorf("determining if mounting build container %q is mounted: %w", b.ContainerID, err)
}
return mounted, b.setMountPoint(layer.MountPoint)
}
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 0ebda161b..11f0933bc 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -190,12 +190,12 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
if ref != nil {
srcSrc, err := ref.NewImageSource(ctx, systemContext)
if err != nil {
- return nil, fmt.Errorf("error instantiating image for %q: %w", transports.ImageName(ref), err)
+ return nil, fmt.Errorf("instantiating image for %q: %w", transports.ImageName(ref), err)
}
defer srcSrc.Close()
manifestBytes, manifestType, err := srcSrc.GetManifest(ctx, nil)
if err != nil {
- return nil, fmt.Errorf("error loading image manifest for %q: %w", transports.ImageName(ref), err)
+ return nil, fmt.Errorf("loading image manifest for %q: %w", transports.ImageName(ref), err)
}
if manifestDigest, err := manifest.Digest(manifestBytes); err == nil {
imageDigest = manifestDigest.String()
@@ -204,17 +204,17 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
if manifest.MIMETypeIsMultiImage(manifestType) {
list, err := manifest.ListFromBlob(manifestBytes, manifestType)
if err != nil {
- return nil, fmt.Errorf("error parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
+ return nil, fmt.Errorf("parsing image manifest for %q as list: %w", transports.ImageName(ref), err)
}
instance, err := list.ChooseInstance(systemContext)
if err != nil {
- return nil, fmt.Errorf("error finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
+ return nil, fmt.Errorf("finding an appropriate image in manifest list %q: %w", transports.ImageName(ref), err)
}
instanceDigest = &instance
}
src, err = image.FromUnparsedImage(ctx, systemContext, image.UnparsedInstance(srcSrc, instanceDigest))
if err != nil {
- return nil, fmt.Errorf("error instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
+ return nil, fmt.Errorf("instantiating image for %q instance %q: %w", transports.ImageName(ref), instanceDigest, err)
}
}
@@ -263,7 +263,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
break
}
if !errors.Is(err, storage.ErrDuplicateName) || options.Container != "" {
- return nil, fmt.Errorf("error creating container: %w", err)
+ return nil, fmt.Errorf("creating container: %w", err)
}
tmpName = fmt.Sprintf("%s-%d", name, rand.Int()%conflict)
conflict = conflict * 10
@@ -333,16 +333,16 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
if options.Mount {
_, err = builder.Mount(container.MountLabel())
if err != nil {
- return nil, fmt.Errorf("error mounting build container %q: %w", builder.ContainerID, err)
+ return nil, fmt.Errorf("mounting build container %q: %w", builder.ContainerID, err)
}
}
if err := builder.initConfig(ctx, src, systemContext); err != nil {
- return nil, fmt.Errorf("error preparing image configuration: %w", err)
+ return nil, fmt.Errorf("preparing image configuration: %w", err)
}
err = builder.Save()
if err != nil {
- return nil, fmt.Errorf("error saving builder state for container %q: %w", builder.ContainerID, err)
+ return nil, fmt.Errorf("saving builder state for container %q: %w", builder.ContainerID, err)
}
return builder, nil
diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
index 9fffc6d70..4614ecf90 100644
--- a/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
+++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user.go
@@ -76,9 +76,9 @@ func GetUser(rootdir, userspec string) (uint32, uint32, string, error) {
return uint32(uid64), uint32(gid64), homedir, nil
}
- err = fmt.Errorf("error determining run uid: %w", uerr)
+ err = fmt.Errorf("determining run uid: %w", uerr)
if uerr == nil {
- err = fmt.Errorf("error determining run gid: %w", gerr)
+ err = fmt.Errorf("determining run gid: %w", gerr)
}
return 0, 0, homedir, err
@@ -94,7 +94,7 @@ func GetGroup(rootdir, groupspec string) (uint32, error) {
gid64, gerr = lookupGroupInContainer(rootdir, groupspec)
}
if gerr != nil {
- return 0, fmt.Errorf("error looking up group for gid %q: %w", groupspec, gerr)
+ return 0, fmt.Errorf("looking up group for gid %q: %w", groupspec, gerr)
}
return uint32(gid64), nil
}
@@ -103,7 +103,7 @@ func GetGroup(rootdir, groupspec string) (uint32, error) {
func GetAdditionalGroupsForUser(rootdir string, userid uint64) ([]uint32, error) {
gids, err := lookupAdditionalGroupsForUIDInContainer(rootdir, userid)
if err != nil {
- return nil, fmt.Errorf("error looking up supplemental groups for uid %d: %w", userid, err)
+ return nil, fmt.Errorf("looking up supplemental groups for uid %d: %w", userid, err)
}
return gids, nil
}
diff --git a/vendor/github.com/containers/buildah/pkg/cli/build.go b/vendor/github.com/containers/buildah/pkg/cli/build.go
index 4ff104a4b..99a78d853 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/build.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/build.go
@@ -20,6 +20,8 @@ import (
"github.com/containers/buildah/pkg/util"
"github.com/containers/common/pkg/auth"
"github.com/containers/image/v5/docker/reference"
+ "github.com/containers/image/v5/types"
+ "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -47,6 +49,18 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
output := ""
cleanTmpFile := false
tags := []string{}
+ if iopts.Network == "none" {
+ if c.Flag("dns").Changed {
+ return options, nil, nil, errors.New("the --dns option cannot be used with --network=none")
+ }
+ if c.Flag("dns-option").Changed {
+ return options, nil, nil, errors.New("the --dns-option option cannot be used with --network=none")
+ }
+ if c.Flag("dns-search").Changed {
+ return options, nil, nil, errors.New("the --dns-search option cannot be used with --network=none")
+ }
+
+ }
if c.Flag("tag").Changed {
tags = iopts.Tag
if len(tags) > 0 {
@@ -147,7 +161,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
// The context directory could be a URL. Try to handle that.
tempDir, subDir, err := define.TempDirForURL("", "buildah", cliArgs[0])
if err != nil {
- return options, nil, nil, fmt.Errorf("error prepping temporary context directory: %w", err)
+ return options, nil, nil, fmt.Errorf("prepping temporary context directory: %w", err)
}
if tempDir != "" {
// We had to download it to a temporary directory.
@@ -158,7 +172,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
// Nope, it was local. Use it as is.
absDir, err := filepath.Abs(cliArgs[0])
if err != nil {
- return options, nil, nil, fmt.Errorf("error determining path to directory: %w", err)
+ return options, nil, nil, fmt.Errorf("determining path to directory: %w", err)
}
contextDir = absDir
}
@@ -176,7 +190,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
contextDir, err = filepath.EvalSymlinks(contextDir)
if err != nil {
- return options, nil, nil, fmt.Errorf("error evaluating symlinks in build context path: %w", err)
+ return options, nil, nil, fmt.Errorf("evaluating symlinks in build context path: %w", err)
}
var stdin io.Reader
@@ -197,7 +211,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
systemContext, err := parse.SystemContextFromOptions(c)
if err != nil {
- return options, nil, nil, fmt.Errorf("error building system context: %w", err)
+ return options, nil, nil, fmt.Errorf("building system context: %w", err)
}
isolation, err := parse.IsolationOption(iopts.Isolation)
@@ -253,7 +267,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
}
usernsOption, idmappingOptions, err := parse.IDMappingOptions(c, isolation)
if err != nil {
- return options, nil, nil, fmt.Errorf("error parsing ID mapping options: %w", err)
+ return options, nil, nil, fmt.Errorf("parsing ID mapping options: %w", err)
}
namespaceOptions.AddOrReplace(usernsOption...)
@@ -269,7 +283,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
var excludes []string
if iopts.IgnoreFile != "" {
- if excludes, _, err = parse.ContainerIgnoreFile(contextDir, iopts.IgnoreFile); err != nil {
+ if excludes, _, err = parse.ContainerIgnoreFile(contextDir, iopts.IgnoreFile, containerfiles); err != nil {
return options, nil, nil, err
}
}
@@ -309,6 +323,18 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
if err != nil {
return options, nil, nil, fmt.Errorf("unable to parse value provided %q as --cache-ttl: %w", iopts.CacheTTL, err)
}
+ // If user explicitly specified `--cache-ttl=0s`
+ // it would effectively mean that user is asking
+ // to use no cache at all. In such use cases
+ // buildah can skip looking for cache entierly
+ // by setting `--no-cache=true` internally.
+ if int64(cacheTTL) == 0 {
+ logrus.Debug("Setting --no-cache=true since --cache-ttl was set to 0s which effectively means user wants to ignore cache")
+ if c.Flag("no-cache").Changed && !iopts.NoCache {
+ return options, nil, nil, fmt.Errorf("cannot use --cache-ttl with duration as 0 and --no-cache=false")
+ }
+ iopts.NoCache = true
+ }
}
var pullPushRetryDelay time.Duration
pullPushRetryDelay, err = time.ParseDuration(iopts.RetryDelay)
@@ -318,6 +344,16 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
// Following log line is used in integration test.
logrus.Debugf("Setting MaxPullPushRetries to %d and PullPushRetryDelay to %v", iopts.Retry, pullPushRetryDelay)
+ if c.Flag("network").Changed && c.Flag("isolation").Changed {
+ if isolation == define.IsolationChroot {
+ if ns := namespaceOptions.Find(string(specs.NetworkNamespace)); ns != nil {
+ if !ns.Host {
+ return options, nil, nil, fmt.Errorf("cannot set --network other than host with --isolation %s", c.Flag("isolation").Value.String())
+ }
+ }
+ }
+ }
+
options = define.BuildOptions{
AddCapabilities: iopts.CapAdd,
AdditionalBuildContexts: additionalBuildContext,
@@ -378,6 +414,7 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) (
RusageLogFile: iopts.RusageLogFile,
SignBy: iopts.SignBy,
SignaturePolicyPath: iopts.SignaturePolicy,
+ SkipUnusedStages: types.NewOptionalBool(iopts.SkipUnusedStages),
Squash: iopts.Squash,
SystemContext: systemContext,
Target: iopts.Target,
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index 00b6bd33b..7f42210b5 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -89,6 +89,7 @@ type BudResults struct {
SignaturePolicy string
SignBy string
Squash bool
+ SkipUnusedStages bool
Stdin bool
Tag []string
BuildOutput string
@@ -260,6 +261,7 @@ func GetBudFlags(flags *BudResults) pflag.FlagSet {
if err := fs.MarkHidden("signature-policy"); err != nil {
panic(fmt.Sprintf("error marking the signature-policy flag as hidden: %v", err))
}
+ fs.BoolVar(&flags.SkipUnusedStages, "skip-unused-stages", true, "skips stages in multi-stage builds which do not affect the final target")
fs.BoolVar(&flags.Squash, "squash", false, "squash newly built layers into a single new layer")
fs.StringArrayVar(&flags.SSH, "ssh", []string{}, "SSH agent socket or keys to expose to the build. (format: default|<id>[=<socket>|<key>[,<key>]])")
fs.BoolVar(&flags.Stdin, "stdin", false, "pass stdin into containers")
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index bbbfb2fc2..b3f260357 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -18,6 +18,7 @@ import (
"github.com/containers/buildah/define"
internalParse "github.com/containers/buildah/internal/parse"
"github.com/containers/buildah/pkg/sshagent"
+ "github.com/containers/common/pkg/config"
"github.com/containers/common/pkg/parse"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types"
@@ -35,9 +36,9 @@ import (
const (
// SeccompDefaultPath defines the default seccomp path
- SeccompDefaultPath = "/usr/share/containers/seccomp.json"
+ SeccompDefaultPath = config.SeccompDefaultPath
// SeccompOverridePath if this exists it overrides the default seccomp path
- SeccompOverridePath = "/etc/crio/seccomp.json"
+ SeccompOverridePath = config.SeccompOverridePath
// TypeBind is the type for mounting host dir
TypeBind = "bind"
// TypeTmpfs is the type for mounting tmpfs
@@ -811,15 +812,15 @@ func parseIDMap(spec []string) (m [][3]uint32, err error) {
for len(args) >= 3 {
cid, err := strconv.ParseUint(args[0], 10, 32)
if err != nil {
- return nil, fmt.Errorf("error parsing container ID %q from mapping %q as a number: %w", args[0], s, err)
+ return nil, fmt.Errorf("parsing container ID %q from mapping %q as a number: %w", args[0], s, err)
}
hostid, err := strconv.ParseUint(args[1], 10, 32)
if err != nil {
- return nil, fmt.Errorf("error parsing host ID %q from mapping %q as a number: %w", args[1], s, err)
+ return nil, fmt.Errorf("parsing host ID %q from mapping %q as a number: %w", args[1], s, err)
}
size, err := strconv.ParseUint(args[2], 10, 32)
if err != nil {
- return nil, fmt.Errorf("error parsing %q from mapping %q as a number: %w", args[2], s, err)
+ return nil, fmt.Errorf("parsing %q from mapping %q as a number: %w", args[2], s, err)
}
m = append(m, [3]uint32{uint32(cid), uint32(hostid), uint32(size)})
args = args[3:]
@@ -1074,11 +1075,32 @@ func SSH(sshSources []string) (map[string]*sshagent.Source, error) {
return parsed, nil
}
-func ContainerIgnoreFile(contextDir, path string) ([]string, string, error) {
+// ContainerIgnoreFile consumes path to `dockerignore` or `containerignore`
+// and returns list of files to exclude along with the path to processed ignore
+// file. Deprecated since this might become internal only, please avoid relying
+// on this function.
+func ContainerIgnoreFile(contextDir, path string, containerFiles []string) ([]string, string, error) {
if path != "" {
excludes, err := imagebuilder.ParseIgnore(path)
return excludes, path, err
}
+ // If path was not supplied give priority to `<containerfile>.containerignore` first.
+ for _, containerfile := range containerFiles {
+ if !filepath.IsAbs(containerfile) {
+ containerfile = filepath.Join(contextDir, containerfile)
+ }
+ containerfileIgnore := ""
+ if _, err := os.Stat(containerfile + ".containerignore"); err == nil {
+ containerfileIgnore = containerfile + ".containerignore"
+ }
+ if _, err := os.Stat(containerfile + ".dockerignore"); err == nil {
+ containerfileIgnore = containerfile + ".dockerignore"
+ }
+ if containerfileIgnore != "" {
+ excludes, err := imagebuilder.ParseIgnore(containerfileIgnore)
+ return excludes, containerfileIgnore, err
+ }
+ }
path = filepath.Join(contextDir, ".containerignore")
excludes, err := imagebuilder.ParseIgnore(path)
if errors.Is(err, os.ErrNotExist) {
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
index a8b1d1a9a..ff8ce854e 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse_unix.go
@@ -20,7 +20,7 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
}
srcInfo, err := os.Stat(src)
if err != nil {
- return nil, fmt.Errorf("error getting info of source device %s: %w", src, err)
+ return nil, fmt.Errorf("getting info of source device %s: %w", src, err)
}
if !srcInfo.IsDir() {
@@ -37,7 +37,7 @@ func DeviceFromPath(device string) (define.ContainerDevices, error) {
// If source device is a directory
srcDevices, err := devices.GetDevices(src)
if err != nil {
- return nil, fmt.Errorf("error getting source devices from directory %s: %w", src, err)
+ return nil, fmt.Errorf("getting source devices from directory %s: %w", src, err)
}
for _, d := range srcDevices {
d.Path = filepath.Join(dst, filepath.Base(d.Path))
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
index e0b9d37b3..317046fc3 100644
--- a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unix.go
@@ -17,7 +17,7 @@ func get() (Rusage, error) {
var rusage syscall.Rusage
err := syscall.Getrusage(syscall.RUSAGE_CHILDREN, &rusage)
if err != nil {
- return Rusage{}, fmt.Errorf("error getting resource usage: %w", err)
+ return Rusage{}, fmt.Errorf("getting resource usage: %w", err)
}
r := Rusage{
Date: time.Now(),
diff --git a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
index 46dd5ebe7..54ed77fad 100644
--- a/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
+++ b/vendor/github.com/containers/buildah/pkg/rusage/rusage_unsupported.go
@@ -9,7 +9,7 @@ import (
)
func get() (Rusage, error) {
- return Rusage{}, fmt.Errorf("error getting resource usage: %w", syscall.ENOTSUP)
+ return Rusage{}, fmt.Errorf("getting resource usage: %w", syscall.ENOTSUP)
}
// Supported returns true if resource usage counters are supported on this OS.
diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go
index a161bb279..1f4439147 100644
--- a/vendor/github.com/containers/buildah/push.go
+++ b/vendor/github.com/containers/buildah/push.go
@@ -32,7 +32,7 @@ func cacheLookupReferenceFunc(directory string, compress types.LayerCompression)
}
ref, err := blobcache.NewBlobCache(ref, directory, compress)
if err != nil {
- return nil, fmt.Errorf("error using blobcache %q: %w", directory, err)
+ return nil, fmt.Errorf("using blobcache %q: %w", directory, err)
}
return ref, nil
}
@@ -135,7 +135,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
manifestDigest, err := manifest.Digest(manifestBytes)
if err != nil {
- return nil, "", fmt.Errorf("error computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
+ return nil, "", fmt.Errorf("computing digest of manifest of new image %q: %w", transports.ImageName(dest), err)
}
var ref reference.Canonical
diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go
index f5a5ec850..6bd20d8df 100644
--- a/vendor/github.com/containers/buildah/run_common.go
+++ b/vendor/github.com/containers/buildah/run_common.go
@@ -98,7 +98,7 @@ func (b *Builder) addResolvConf(rdir string, chownOpts *idtools.IDPair, dnsServe
Searches: searches,
Options: options,
}); err != nil {
- return "", fmt.Errorf("error building resolv.conf for container %s: %w", b.ContainerID, err)
+ return "", fmt.Errorf("building resolv.conf for container %s: %w", b.ContainerID, err)
}
uid := 0
@@ -165,7 +165,7 @@ func (b *Builder) generateHostname(rdir, hostname string, chownOpts *idtools.IDP
cfile := filepath.Join(rdir, filepath.Base(hostnamePath))
if err = ioutils.AtomicWriteFile(cfile, hostnameBuffer.Bytes(), 0644); err != nil {
- return "", fmt.Errorf("error writing /etc/hostname into the container: %w", err)
+ return "", fmt.Errorf("writing /etc/hostname into the container: %w", err)
}
uid := 0
@@ -419,10 +419,10 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
// Write the runtime configuration.
specbytes, err := json.Marshal(spec)
if err != nil {
- return 1, fmt.Errorf("error encoding configuration %#v as json: %w", spec, err)
+ return 1, fmt.Errorf("encoding configuration %#v as json: %w", spec, err)
}
if err = ioutils.AtomicWriteFile(filepath.Join(bundlePath, "config.json"), specbytes, 0600); err != nil {
- return 1, fmt.Errorf("error storing runtime configuration: %w", err)
+ return 1, fmt.Errorf("storing runtime configuration: %w", err)
}
logrus.Debugf("config = %v", string(specbytes))
@@ -451,7 +451,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
copyPipes := false
finishCopy := make([]int, 2)
if err = unix.Pipe(finishCopy); err != nil {
- return 1, fmt.Errorf("error creating pipe for notifying to stop stdio: %w", err)
+ return 1, fmt.Errorf("creating pipe for notifying to stop stdio: %w", err)
}
finishedCopy := make(chan struct{}, 1)
var pargs []string
@@ -463,7 +463,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
socketPath := filepath.Join(bundlePath, "console.sock")
consoleListener, err = net.ListenUnix("unix", &net.UnixAddr{Name: socketPath, Net: "unix"})
if err != nil {
- return 1, fmt.Errorf("error creating socket %q to receive terminal descriptor: %w", consoleListener.Addr(), err)
+ return 1, fmt.Errorf("creating socket %q to receive terminal descriptor: %w", consoleListener.Addr(), err)
}
// Add console socket arguments.
moreCreateArgs = append(moreCreateArgs, "--console-socket", socketPath)
@@ -542,13 +542,13 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
logrus.Debugf("Running %q", create.Args)
err = create.Run()
if err != nil {
- return 1, fmt.Errorf("error from %s creating container for %v: %s: %w", runtime, pargs, runCollectOutput(options.Logger, errorFds, closeBeforeReadingErrorFds), err)
+ return 1, fmt.Errorf("from %s creating container for %v: %s: %w", runtime, pargs, runCollectOutput(options.Logger, errorFds, closeBeforeReadingErrorFds), err)
}
defer func() {
err2 := del.Run()
if err2 != nil {
if err == nil {
- err = fmt.Errorf("error deleting container: %w", err2)
+ err = fmt.Errorf("deleting container: %w", err2)
} else {
options.Logger.Infof("error from %s deleting container: %v", runtime, err2)
}
@@ -562,7 +562,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
if err != nil {
- return 1, fmt.Errorf("error parsing pid %s as a number: %w", string(pidValue), err)
+ return 1, fmt.Errorf("parsing pid %s as a number: %w", string(pidValue), err)
}
var stopped uint32
var reaping sync.WaitGroup
@@ -608,7 +608,7 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
logrus.Debugf("Running %q", start.Args)
err = start.Run()
if err != nil {
- return 1, fmt.Errorf("error from %s starting container: %w", runtime, err)
+ return 1, fmt.Errorf("from %s starting container: %w", runtime, err)
}
defer func() {
if atomic.LoadUint32(&stopped) == 0 {
@@ -642,10 +642,10 @@ func runUsingRuntime(options RunOptions, configureNetwork bool, moreCreateArgs [
// container exited
break
}
- return 1, fmt.Errorf("error reading container state from %s (got output: %q): %w", runtime, string(stateOutput), err)
+ return 1, fmt.Errorf("reading container state from %s (got output: %q): %w", runtime, string(stateOutput), err)
}
if err = json.Unmarshal(stateOutput, &state); err != nil {
- return 1, fmt.Errorf("error parsing container state %q from %s: %w", string(stateOutput), runtime, err)
+ return 1, fmt.Errorf("parsing container state %q from %s: %w", string(stateOutput), runtime, err)
}
switch state.Status {
case "running":
@@ -964,7 +964,7 @@ func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener,
defer consoleListener.Close()
c, err := consoleListener.AcceptUnix()
if err != nil {
- return -1, fmt.Errorf("error accepting socket descriptor connection: %w", err)
+ return -1, fmt.Errorf("accepting socket descriptor connection: %w", err)
}
defer c.Close()
// Expect a control message over our new connection.
@@ -972,7 +972,7 @@ func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener,
oob := make([]byte, 8192)
n, oobn, _, _, err := c.ReadMsgUnix(b, oob)
if err != nil {
- return -1, fmt.Errorf("error reading socket descriptor: %w", err)
+ return -1, fmt.Errorf("reading socket descriptor: %w", err)
}
if n > 0 {
logrus.Debugf("socket descriptor is for %q", string(b[:n]))
@@ -983,7 +983,7 @@ func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener,
// Parse the control message.
scm, err := unix.ParseSocketControlMessage(oob[:oobn])
if err != nil {
- return -1, fmt.Errorf("error parsing out-of-bound data as a socket control message: %w", err)
+ return -1, fmt.Errorf("parsing out-of-bound data as a socket control message: %w", err)
}
logrus.Debugf("control messages: %v", scm)
// Expect to get a descriptor.
@@ -991,7 +991,7 @@ func runAcceptTerminal(logger *logrus.Logger, consoleListener *net.UnixListener,
for i := range scm {
fds, err := unix.ParseUnixRights(&scm[i])
if err != nil {
- return -1, fmt.Errorf("error parsing unix rights control message: %v: %w", &scm[i], err)
+ return -1, fmt.Errorf("parsing unix rights control message: %v: %w", &scm[i], err)
}
logrus.Debugf("fds: %v", fds)
if len(fds) == 0 {
@@ -1106,7 +1106,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
Isolation: isolation,
})
if conferr != nil {
- return fmt.Errorf("error encoding configuration for %q: %w", runUsingRuntimeCommand, conferr)
+ return fmt.Errorf("encoding configuration for %q: %w", runUsingRuntimeCommand, conferr)
}
cmd := reexec.Command(runUsingRuntimeCommand)
setPdeathsig(cmd)
@@ -1126,13 +1126,13 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
cmd.Env = util.MergeEnv(os.Environ(), []string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())})
preader, pwriter, err := os.Pipe()
if err != nil {
- return fmt.Errorf("error creating configuration pipe: %w", err)
+ return fmt.Errorf("creating configuration pipe: %w", err)
}
confwg.Add(1)
go func() {
_, conferr = io.Copy(pwriter, bytes.NewReader(config))
if conferr != nil {
- conferr = fmt.Errorf("error while copying configuration down pipe to child process: %w", conferr)
+ conferr = fmt.Errorf("while copying configuration down pipe to child process: %w", conferr)
}
confwg.Done()
}()
@@ -1143,14 +1143,14 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
if configureNetwork {
containerCreateR.file, containerCreateW.file, err = os.Pipe()
if err != nil {
- return fmt.Errorf("error creating container create pipe: %w", err)
+ return fmt.Errorf("creating container create pipe: %w", err)
}
defer containerCreateR.Close()
defer containerCreateW.Close()
containerStartR.file, containerStartW.file, err = os.Pipe()
if err != nil {
- return fmt.Errorf("error creating container start pipe: %w", err)
+ return fmt.Errorf("creating container start pipe: %w", err)
}
defer containerStartR.Close()
defer containerStartW.Close()
@@ -1161,7 +1161,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
defer preader.Close()
defer pwriter.Close()
if err := cmd.Start(); err != nil {
- return fmt.Errorf("error while starting runtime: %w", err)
+ return fmt.Errorf("while starting runtime: %w", err)
}
interrupted := make(chan os.Signal, 100)
@@ -1191,7 +1191,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidValue)))
if err != nil {
- return fmt.Errorf("error parsing pid %s as a number: %w", string(pidValue), err)
+ return fmt.Errorf("parsing pid %s as a number: %w", string(pidValue), err)
}
teardown, netstatus, err := b.runConfigureNetwork(pid, isolation, options, configureNetworks, containerName)
@@ -1227,7 +1227,7 @@ func (b *Builder) runUsingRuntimeSubproc(isolation define.Isolation, options Run
}
if err := cmd.Wait(); err != nil {
- return fmt.Errorf("error while running runtime: %w", err)
+ return fmt.Errorf("while running runtime: %w", err)
}
confwg.Wait()
signal.Stop(interrupted)
@@ -1280,7 +1280,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
// After this point we need to know the per-container persistent storage directory.
cdir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
- return nil, fmt.Errorf("error determining work directory for container %q: %w", b.ContainerID, err)
+ return nil, fmt.Errorf("determining work directory for container %q: %w", b.ContainerID, err)
}
// Figure out which UID and GID to tell the subscriptions package to use
@@ -1408,7 +1408,7 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin
}
logrus.Debugf("populating directory %q for volume %q using contents of %q", volumePath, volume, srcPath)
if err = extractWithTar(mountPoint, srcPath, volumePath); err != nil && !errors.Is(err, os.ErrNotExist) {
- return nil, fmt.Errorf("error populating directory %q for volume %q using contents of %q: %w", volumePath, volume, srcPath, err)
+ return nil, fmt.Errorf("populating directory %q for volume %q using contents of %q: %w", volumePath, volume, srcPath, err)
}
}
// Add the bind mount.
@@ -1445,6 +1445,8 @@ func cleanableDestinationListFromMounts(mounts []spec.Mount) []string {
// runSetupRunMounts sets up mounts that exist only in this RUN, not in subsequent runs
func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMaps IDMaps) ([]spec.Mount, *runMountArtifacts, error) {
+ // If `type` is not set default to "bind"
+ mountType := internalParse.TypeBind
mountTargets := make([]string, 0, 10)
tmpFiles := make([]string, 0, len(mounts))
mountImages := make([]string, 0, 10)
@@ -1452,20 +1454,19 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
agents := make([]*sshagent.AgentServer, 0, len(mounts))
sshCount := 0
defaultSSHSock := ""
- tokens := []string{}
lockedTargets := []string{}
for _, mount := range mounts {
- arr := strings.SplitN(mount, ",", 2)
-
- kv := strings.Split(arr[0], "=")
- if len(kv) != 2 || kv[0] != "type" {
- return nil, nil, errors.New("invalid mount type")
- }
- if len(arr) == 2 {
- tokens = strings.Split(arr[1], ",")
+ tokens := strings.Split(mount, ",")
+ for _, field := range tokens {
+ if strings.HasPrefix(field, "type=") {
+ kv := strings.Split(field, "=")
+ if len(kv) != 2 {
+ return nil, nil, errors.New("invalid mount type")
+ }
+ mountType = kv[1]
+ }
}
-
- switch kv[1] {
+ switch mountType {
case "secret":
mount, envFile, err := b.getSecretMount(tokens, sources.Secrets, idMaps)
if err != nil {
@@ -1520,7 +1521,7 @@ func (b *Builder) runSetupRunMounts(mounts []string, sources runMountInfo, idMap
mountTargets = append(mountTargets, mount.Destination)
lockedTargets = lockedPaths
default:
- return nil, nil, fmt.Errorf("invalid mount type %q", kv[1])
+ return nil, nil, fmt.Errorf("invalid mount type %q", mountType)
}
}
artifacts := &runMountArtifacts{
@@ -1578,6 +1579,9 @@ func (b *Builder) getSecretMount(tokens []string, secrets map[string]define.Secr
for _, val := range tokens {
kv := strings.SplitN(val, "=", 2)
switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
case "id":
id = kv[1]
case "target", "dst", "destination":
@@ -1698,6 +1702,9 @@ func (b *Builder) getSSHMount(tokens []string, count int, sshsources map[string]
return nil, nil, errInvalidSyntax
}
switch kv[0] {
+ case "type":
+ // This is already processed
+ continue
case "id":
id = kv[1]
case "target", "dst", "destination":
diff --git a/vendor/github.com/containers/buildah/run_freebsd.go b/vendor/github.com/containers/buildah/run_freebsd.go
index b8d141eec..27982560b 100644
--- a/vendor/github.com/containers/buildah/run_freebsd.go
+++ b/vendor/github.com/containers/buildah/run_freebsd.go
@@ -90,7 +90,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
gp, err := generate.New("freebsd")
if err != nil {
- return fmt.Errorf("error generating new 'freebsd' runtime spec: %w", err)
+ return fmt.Errorf("generating new 'freebsd' runtime spec: %w", err)
}
g := &gp
@@ -123,7 +123,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
}
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
- return fmt.Errorf("error mounting container %q: %w", b.ContainerID, err)
+ return fmt.Errorf("mounting container %q: %w", b.ContainerID, err)
}
defer func() {
if err := b.Unmount(); err != nil {
@@ -216,7 +216,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
if err != nil {
- return fmt.Errorf("error resolving mountpoints for container %q: %w", b.ContainerID, err)
+ return fmt.Errorf("resolving mountpoints for container %q: %w", b.ContainerID, err)
}
if runArtifacts.SSHAuthSock != "" {
sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock
@@ -316,7 +316,7 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
// Make sure the overlay directory is clean before running
_, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
- return nil, fmt.Errorf("error looking up container directory for %s: %w", b.ContainerID, err)
+ return nil, fmt.Errorf("looking up container directory for %s: %w", b.ContainerID, err)
}
parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
@@ -542,7 +542,7 @@ func runMakeStdioPipe(uid, gid int) ([][]int, error) {
for i := range stdioPipe {
stdioPipe[i] = make([]int, 2)
if err := unix.Pipe(stdioPipe[i]); err != nil {
- return nil, fmt.Errorf("error creating pipe for container FD %d: %w", i, err)
+ return nil, fmt.Errorf("creating pipe for container FD %d: %w", i, err)
}
}
return stdioPipe, nil
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index a5d51732f..d4707e39a 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -88,7 +88,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
gp, err := generate.New("linux")
if err != nil {
- return fmt.Errorf("error generating new 'linux' runtime spec: %w", err)
+ return fmt.Errorf("generating new 'linux' runtime spec: %w", err)
}
g := &gp
@@ -122,7 +122,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
setupSelinux(g, b.ProcessLabel, b.MountLabel)
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
- return fmt.Errorf("error mounting container %q: %w", b.ContainerID, err)
+ return fmt.Errorf("mounting container %q: %w", b.ContainerID, err)
}
defer func() {
if err := b.Unmount(); err != nil {
@@ -327,7 +327,7 @@ rootless=%d
runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
if err != nil {
- return fmt.Errorf("error resolving mountpoints for container %q: %w", b.ContainerID, err)
+ return fmt.Errorf("resolving mountpoints for container %q: %w", b.ContainerID, err)
}
if runArtifacts.SSHAuthSock != "" {
sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock
@@ -506,7 +506,7 @@ func setupRootlessNetwork(pid int) (teardown func(), err error) {
b := make([]byte, 1)
for {
if err := rootlessSlirpSyncR.SetDeadline(time.Now().Add(1 * time.Second)); err != nil {
- return nil, fmt.Errorf("error setting slirp4netns pipe timeout: %w", err)
+ return nil, fmt.Errorf("setting slirp4netns pipe timeout: %w", err)
}
if _, err := rootlessSlirpSyncR.Read(b); err == nil {
break
@@ -552,7 +552,7 @@ func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, optio
netns := fmt.Sprintf("/proc/%d/ns/net", pid)
netFD, err := unix.Open(netns, unix.O_RDONLY, 0)
if err != nil {
- return nil, nil, fmt.Errorf("error opening network namespace: %w", err)
+ return nil, nil, fmt.Errorf("opening network namespace: %w", err)
}
mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD)
@@ -589,17 +589,17 @@ func runMakeStdioPipe(uid, gid int) ([][]int, error) {
for i := range stdioPipe {
stdioPipe[i] = make([]int, 2)
if err := unix.Pipe(stdioPipe[i]); err != nil {
- return nil, fmt.Errorf("error creating pipe for container FD %d: %w", i, err)
+ return nil, fmt.Errorf("creating pipe for container FD %d: %w", i, err)
}
}
if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil {
- return nil, fmt.Errorf("error setting owner of stdin pipe descriptor: %w", err)
+ return nil, fmt.Errorf("setting owner of stdin pipe descriptor: %w", err)
}
if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil {
- return nil, fmt.Errorf("error setting owner of stdout pipe descriptor: %w", err)
+ return nil, fmt.Errorf("setting owner of stdout pipe descriptor: %w", err)
}
if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil {
- return nil, fmt.Errorf("error setting owner of stderr pipe descriptor: %w", err)
+ return nil, fmt.Errorf("setting owner of stderr pipe descriptor: %w", err)
}
return stdioPipe, nil
}
@@ -633,20 +633,20 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
}
if namespaceOption.Host {
if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil {
- return false, nil, false, fmt.Errorf("error removing %q namespace for run: %w", namespaceOption.Name, err)
+ return false, nil, false, fmt.Errorf("removing %q namespace for run: %w", namespaceOption.Name, err)
}
} else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil {
if namespaceOption.Path == "" {
- return false, nil, false, fmt.Errorf("error adding new %q namespace for run: %w", namespaceOption.Name, err)
+ return false, nil, false, fmt.Errorf("adding new %q namespace for run: %w", namespaceOption.Name, err)
}
- return false, nil, false, fmt.Errorf("error adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err)
+ return false, nil, false, fmt.Errorf("adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err)
}
}
// If we've got mappings, we're going to have to create a user namespace.
if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns {
if err := g.AddOrReplaceLinuxNamespace(string(specs.UserNamespace), ""); err != nil {
- return false, nil, false, fmt.Errorf("error adding new %q namespace for run: %w", string(specs.UserNamespace), err)
+ return false, nil, false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.UserNamespace), err)
}
hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("")
if err != nil {
@@ -670,17 +670,17 @@ func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOpti
}
if !specifiedNetwork {
if err := g.AddOrReplaceLinuxNamespace(string(specs.NetworkNamespace), ""); err != nil {
- return false, nil, false, fmt.Errorf("error adding new %q namespace for run: %w", string(specs.NetworkNamespace), err)
+ return false, nil, false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.NetworkNamespace), err)
}
configureNetwork = (policy != define.NetworkDisabled)
}
} else {
if err := g.RemoveLinuxNamespace(string(specs.UserNamespace)); err != nil {
- return false, nil, false, fmt.Errorf("error removing %q namespace for run: %w", string(specs.UserNamespace), err)
+ return false, nil, false, fmt.Errorf("removing %q namespace for run: %w", string(specs.UserNamespace), err)
}
if !specifiedNetwork {
if err := g.RemoveLinuxNamespace(string(specs.NetworkNamespace)); err != nil {
- return false, nil, false, fmt.Errorf("error removing %q namespace for run: %w", string(specs.NetworkNamespace), err)
+ return false, nil, false, fmt.Errorf("removing %q namespace for run: %w", string(specs.NetworkNamespace), err)
}
}
}
@@ -726,7 +726,9 @@ func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions
options.ConfigureNetwork = networkPolicy
}
}
-
+ if networkPolicy == NetworkDisabled {
+ namespaceOptions.AddOrReplace(define.NamespaceOptions{{Name: string(specs.NetworkNamespace), Host: false}}...)
+ }
configureNetwork, configureNetworks, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
if err != nil {
return false, nil, err
@@ -796,10 +798,10 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
// Make sure the overlay directory is clean before running
containerDir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
- return nil, fmt.Errorf("error looking up container directory for %s: %w", b.ContainerID, err)
+ return nil, fmt.Errorf("looking up container directory for %s: %w", b.ContainerID, err)
}
if err := overlay.CleanupContent(containerDir); err != nil {
- return nil, fmt.Errorf("error cleaning up overlay content for %s: %w", b.ContainerID, err)
+ return nil, fmt.Errorf("cleaning up overlay content for %s: %w", b.ContainerID, err)
}
parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
@@ -966,16 +968,16 @@ func setupReadOnlyPaths(g *generate.Generator) {
func setupCapAdd(g *generate.Generator, caps ...string) error {
for _, cap := range caps {
if err := g.AddProcessCapabilityBounding(cap); err != nil {
- return fmt.Errorf("error adding %q to the bounding capability set: %w", cap, err)
+ return fmt.Errorf("adding %q to the bounding capability set: %w", cap, err)
}
if err := g.AddProcessCapabilityEffective(cap); err != nil {
- return fmt.Errorf("error adding %q to the effective capability set: %w", cap, err)
+ return fmt.Errorf("adding %q to the effective capability set: %w", cap, err)
}
if err := g.AddProcessCapabilityPermitted(cap); err != nil {
- return fmt.Errorf("error adding %q to the permitted capability set: %w", cap, err)
+ return fmt.Errorf("adding %q to the permitted capability set: %w", cap, err)
}
if err := g.AddProcessCapabilityAmbient(cap); err != nil {
- return fmt.Errorf("error adding %q to the ambient capability set: %w", cap, err)
+ return fmt.Errorf("adding %q to the ambient capability set: %w", cap, err)
}
}
return nil
@@ -984,16 +986,16 @@ func setupCapAdd(g *generate.Generator, caps ...string) error {
func setupCapDrop(g *generate.Generator, caps ...string) error {
for _, cap := range caps {
if err := g.DropProcessCapabilityBounding(cap); err != nil {
- return fmt.Errorf("error removing %q from the bounding capability set: %w", cap, err)
+ return fmt.Errorf("removing %q from the bounding capability set: %w", cap, err)
}
if err := g.DropProcessCapabilityEffective(cap); err != nil {
- return fmt.Errorf("error removing %q from the effective capability set: %w", cap, err)
+ return fmt.Errorf("removing %q from the effective capability set: %w", cap, err)
}
if err := g.DropProcessCapabilityPermitted(cap); err != nil {
- return fmt.Errorf("error removing %q from the permitted capability set: %w", cap, err)
+ return fmt.Errorf("removing %q from the permitted capability set: %w", cap, err)
}
if err := g.DropProcessCapabilityAmbient(cap); err != nil {
- return fmt.Errorf("error removing %q from the ambient capability set: %w", cap, err)
+ return fmt.Errorf("removing %q from the ambient capability set: %w", cap, err)
}
}
return nil
diff --git a/vendor/github.com/containers/buildah/seccomp.go b/vendor/github.com/containers/buildah/seccomp.go
index 668123233..0f9a2c48b 100644
--- a/vendor/github.com/containers/buildah/seccomp.go
+++ b/vendor/github.com/containers/buildah/seccomp.go
@@ -24,7 +24,7 @@ func setupSeccomp(spec *specs.Spec, seccompProfilePath string) error {
default:
seccompProfile, err := ioutil.ReadFile(seccompProfilePath)
if err != nil {
- return fmt.Errorf("opening seccomp profile (%s) failed: %w", seccompProfilePath, err)
+ return fmt.Errorf("opening seccomp profile failed: %w", err)
}
seccompConfig, err := seccomp.LoadProfile(string(seccompProfile), spec)
if err != nil {
diff --git a/vendor/github.com/containers/buildah/unmount.go b/vendor/github.com/containers/buildah/unmount.go
index ae9726ee3..66c8ce41a 100644
--- a/vendor/github.com/containers/buildah/unmount.go
+++ b/vendor/github.com/containers/buildah/unmount.go
@@ -6,12 +6,12 @@ import "fmt"
func (b *Builder) Unmount() error {
_, err := b.store.Unmount(b.ContainerID, false)
if err != nil {
- return fmt.Errorf("error unmounting build container %q: %w", b.ContainerID, err)
+ return fmt.Errorf("unmounting build container %q: %w", b.ContainerID, err)
}
b.MountPoint = ""
err = b.Save()
if err != nil {
- return fmt.Errorf("error saving updated state for build container %q: %w", b.ContainerID, err)
+ return fmt.Errorf("saving updated state for build container %q: %w", b.ContainerID, err)
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/util.go b/vendor/github.com/containers/buildah/util.go
index b362dec84..ddc97cc6e 100644
--- a/vendor/github.com/containers/buildah/util.go
+++ b/vendor/github.com/containers/buildah/util.go
@@ -151,7 +151,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
if selinuxGetEnabled() {
containers, err := store.Containers()
if err != nil {
- return fmt.Errorf("error getting list of containers: %w", err)
+ return fmt.Errorf("getting list of containers: %w", err)
}
for _, c := range containers {
@@ -169,7 +169,7 @@ func ReserveSELinuxLabels(store storage.Store, id string) error {
}
// Prevent different containers from using same MCS label
if err := label.ReserveLabel(b.ProcessLabel); err != nil {
- return fmt.Errorf("error reserving SELinux label %q: %w", b.ProcessLabel, err)
+ return fmt.Errorf("reserving SELinux label %q: %w", b.ProcessLabel, err)
}
}
}
@@ -219,10 +219,10 @@ func extractWithTar(root, src, dest string) error {
wg.Wait()
if getErr != nil {
- return fmt.Errorf("error reading %q: %w", src, getErr)
+ return fmt.Errorf("reading %q: %w", src, getErr)
}
if putErr != nil {
- return fmt.Errorf("error copying contents of %q to %q: %w", src, dest, putErr)
+ return fmt.Errorf("copying contents of %q to %q: %w", src, dest, putErr)
}
return nil
}
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 6a9049e68..4c67af703 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -118,18 +118,18 @@ func ExpandNames(names []string, systemContext *types.SystemContext, store stora
var name reference.Named
nameList, _, err := resolveName(n, systemContext, store)
if err != nil {
- return nil, fmt.Errorf("error parsing name %q: %w", n, err)
+ return nil, fmt.Errorf("parsing name %q: %w", n, err)
}
if len(nameList) == 0 {
named, err := reference.ParseNormalizedNamed(n)
if err != nil {
- return nil, fmt.Errorf("error parsing name %q: %w", n, err)
+ return nil, fmt.Errorf("parsing name %q: %w", n, err)
}
name = named
} else {
named, err := reference.ParseNormalizedNamed(nameList[0])
if err != nil {
- return nil, fmt.Errorf("error parsing name %q: %w", nameList[0], err)
+ return nil, fmt.Errorf("parsing name %q: %w", nameList[0], err)
}
name = named
}
@@ -169,7 +169,7 @@ func ResolveNameToReferences(
) (refs []types.ImageReference, err error) {
names, transport, err := resolveName(image, systemContext, store)
if err != nil {
- return nil, fmt.Errorf("error parsing name %q: %w", image, err)
+ return nil, fmt.Errorf("parsing name %q: %w", image, err)
}
if transport != DefaultTransport {
@@ -185,7 +185,7 @@ func ResolveNameToReferences(
refs = append(refs, ref)
}
if len(refs) == 0 {
- return nil, fmt.Errorf("error locating images with names %v", names)
+ return nil, fmt.Errorf("locating images with names %v", names)
}
return refs, nil
}
@@ -206,7 +206,7 @@ func AddImageNames(store storage.Store, firstRegistry string, systemContext *typ
for _, tag := range addNames {
if err := localImage.Tag(tag); err != nil {
- return fmt.Errorf("error tagging image %s: %w", image.ID, err)
+ return fmt.Errorf("tagging image %s: %w", image.ID, err)
}
}
diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go
index 3cc843ed3..0130532c2 100644
--- a/vendor/github.com/containers/common/libimage/image.go
+++ b/vendor/github.com/containers/common/libimage/image.go
@@ -477,8 +477,10 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma
}
report.Untagged = append(report.Untagged, i.Names()...)
- for _, name := range i.Names() {
- i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageUntag})
+ if i.runtime.eventChannel != nil {
+ for _, name := range i.Names() {
+ i.runtime.writeEvent(&Event{ID: i.ID(), Name: name, Time: time.Now(), Type: EventTypeImageUntag})
+ }
}
if !hasChildren {
diff --git a/vendor/github.com/containers/common/libnetwork/types/network.go b/vendor/github.com/containers/common/libnetwork/types/network.go
index de8655377..2e8948998 100644
--- a/vendor/github.com/containers/common/libnetwork/types/network.go
+++ b/vendor/github.com/containers/common/libnetwork/types/network.go
@@ -199,6 +199,7 @@ type NetAddress struct {
// PerNetworkOptions are options which should be set on a per network basis.
type PerNetworkOptions struct {
// StaticIPs for this container. Optional.
+ // swagger:type []string
StaticIPs []net.IP `json:"static_ips,omitempty"`
// Aliases contains a list of names which the dns server should resolve
// to this container. Should only be set when DNSEnabled is true on the Network.
@@ -207,6 +208,7 @@ type PerNetworkOptions struct {
// Optional.
Aliases []string `json:"aliases,omitempty"`
// StaticMac for this container. Optional.
+ // swagger:strfmt string
StaticMAC HardwareAddr `json:"static_mac,omitempty"`
// InterfaceName for this container. Required in the backend.
// Optional in the frontend. Will be filled with ethX (where X is a integer) when empty.
diff --git a/vendor/github.com/containers/common/pkg/config/config_linux.go b/vendor/github.com/containers/common/pkg/config/config_linux.go
index 4f0889f29..0f622bb78 100644
--- a/vendor/github.com/containers/common/pkg/config/config_linux.go
+++ b/vendor/github.com/containers/common/pkg/config/config_linux.go
@@ -23,7 +23,7 @@ func customConfigFile() (string, error) {
if path, found := os.LookupEnv("CONTAINERS_CONF"); found {
return path, nil
}
- if unshare.IsRootless() {
+ if unshare.GetRootlessUID() > 0 {
path, err := rootlessConfigPath()
if err != nil {
return "", err
@@ -34,7 +34,7 @@ func customConfigFile() (string, error) {
}
func ifRootlessConfigPath() (string, error) {
- if unshare.IsRootless() {
+ if unshare.GetRootlessUID() > 0 {
path, err := rootlessConfigPath()
if err != nil {
return "", err
diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go
index 3a3a558a1..eb3f5fb1e 100644
--- a/vendor/github.com/containers/common/pkg/config/default.go
+++ b/vendor/github.com/containers/common/pkg/config/default.go
@@ -180,7 +180,7 @@ func DefaultConfig() (*Config, error) {
}
defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath
- if unshare.IsRootless() {
+ if unshare.GetRootlessUID() > 0 {
configHome, err := homedir.GetConfigHome()
if err != nil {
return nil, err
@@ -289,7 +289,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
return nil, err
}
}
- storeOpts, err := types.DefaultStoreOptions(unshare.IsRootless(), unshare.GetRootlessUID())
+ storeOpts, err := types.DefaultStoreOptions(unshare.GetRootlessUID() > 0, unshare.GetRootlessUID())
if err != nil {
return nil, err
}
@@ -427,7 +427,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
}
func defaultTmpDir() (string, error) {
- if !unshare.IsRootless() {
+ if unshare.GetRootlessUID() == 0 {
return getLibpodTmpDir(), nil
}
diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
index 9623546d8..0790a47f2 100644
--- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go
@@ -683,7 +683,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
// keys we prefer exact matches as well.
for _, key := range keys {
if val, exists := auths.AuthConfigs[key]; exists {
- return decodeDockerAuth(val)
+ return decodeDockerAuth(path, key, val)
}
}
@@ -698,7 +698,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
registry = normalizeRegistry(registry)
for k, v := range auths.AuthConfigs {
if normalizeAuthFileKey(k, legacyFormat) == registry {
- return decodeDockerAuth(v)
+ return decodeDockerAuth(path, k, v)
}
}
@@ -729,9 +729,9 @@ func authKeysForKey(key string) (res []string) {
return res
}
-// decodeDockerAuth decodes the username and password, which is
-// encoded in base64.
-func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
+// decodeDockerAuth decodes the username and password from conf,
+// which is entry key in path.
+func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) {
decoded, err := base64.StdEncoding.DecodeString(conf.Auth)
if err != nil {
return types.DockerAuthConfig{}, err
@@ -740,6 +740,11 @@ func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
parts := strings.SplitN(string(decoded), ":", 2)
if len(parts) != 2 {
// if it's invalid just skip, as docker does
+ if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those
+ logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log.
+ } else {
+ logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path)
+ }
return types.DockerAuthConfig{}, nil
}
diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go
index 81c9894c5..4f2b61f52 100644
--- a/vendor/github.com/containers/storage/containers.go
+++ b/vendor/github.com/containers/storage/containers.go
@@ -3,7 +3,6 @@ package storage
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sync"
@@ -144,26 +143,26 @@ func copyContainer(c *Container) *Container {
}
func (c *Container) MountLabel() string {
- if label, ok := c.Flags["MountLabel"].(string); ok {
+ if label, ok := c.Flags[mountLabelFlag].(string); ok {
return label
}
return ""
}
func (c *Container) ProcessLabel() string {
- if label, ok := c.Flags["ProcessLabel"].(string); ok {
+ if label, ok := c.Flags[processLabelFlag].(string); ok {
return label
}
return ""
}
func (c *Container) MountOpts() []string {
- switch c.Flags["MountOpts"].(type) {
+ switch c.Flags[mountOptsFlag].(type) {
case []string:
- return c.Flags["MountOpts"].([]string)
+ return c.Flags[mountOptsFlag].([]string)
case []interface{}:
var mountOpts []string
- for _, v := range c.Flags["MountOpts"].([]interface{}) {
+ for _, v := range c.Flags[mountOptsFlag].([]interface{}) {
if flag, ok := v.(string); ok {
mountOpts = append(mountOpts, flag)
}
@@ -197,7 +196,7 @@ func (r *containerStore) datapath(id, key string) string {
func (r *containerStore) Load() error {
needSave := false
rpath := r.containerspath()
- data, err := ioutil.ReadFile(rpath)
+ data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -321,10 +320,10 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
return nil, ErrDuplicateID
}
if options.MountOpts != nil {
- options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
+ options.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
}
if options.Volatile {
- options.Flags["Volatile"] = true
+ options.Flags[volatileFlag] = true
}
names = dedupeNames(names)
for _, name := range names {
@@ -484,7 +483,7 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
if !ok {
return nil, ErrContainerUnknown
}
- return ioutil.ReadFile(r.datapath(c.ID, key))
+ return os.ReadFile(r.datapath(c.ID, key))
}
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
index f0eb6d2fd..2642874be 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go
@@ -29,7 +29,6 @@ import (
"fmt"
"io"
"io/fs"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -170,7 +169,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
for _, path := range []string{"mnt", "diff"} {
p := filepath.Join(home, path)
- entries, err := ioutil.ReadDir(p)
+ entries, err := os.ReadDir(p)
if err != nil {
logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
continue
@@ -730,14 +729,14 @@ func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.M
// version of aufs.
func useDirperm() bool {
enableDirpermLock.Do(func() {
- base, err := ioutil.TempDir("", "storage-aufs-base")
+ base, err := os.MkdirTemp("", "storage-aufs-base")
if err != nil {
logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(base)
- union, err := ioutil.TempDir("", "storage-aufs-union")
+ union, err := os.MkdirTemp("", "storage-aufs-union")
if err != nil {
logrus.Errorf("Checking dirperm1: %v", err)
return
diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
index d2325fc46..27e621633 100644
--- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go
+++ b/vendor/github.com/containers/storage/drivers/aufs/dirs.go
@@ -1,17 +1,17 @@
+//go:build linux
// +build linux
package aufs
import (
"bufio"
- "io/ioutil"
"os"
"path"
)
// Return all the directories
func loadIds(root string) ([]string, error) {
- dirs, err := ioutil.ReadDir(root)
+ dirs, err := os.ReadDir(root)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
deleted file mode 100644
index d030b0663..000000000
--- a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !linux
-
-package aufs
-
-import "errors"
-
-// MsRemount declared to specify a non-linux system mount.
-const MsRemount = 0
-
-func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
- return errors.New("mount is not implemented on this platform")
-}
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
index be44390da..0b5d1c510 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
@@ -18,7 +18,6 @@ import "C"
import (
"fmt"
"io/fs"
- "io/ioutil"
"math"
"os"
"path"
@@ -524,7 +523,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
return err
}
- if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
+ if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
return err
}
}
@@ -643,7 +642,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return "", fmt.Errorf("%s: not a directory", dir)
}
- if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
+ if quota, err := os.ReadFile(d.quotasDirID(id)); err == nil {
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
if err := d.enableQuota(); err != nil {
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
index 905e834e3..a61d8fbd9 100644
--- a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
+++ b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go
@@ -1,4 +1,5 @@
-// +build !linux btrfs_noversion !cgo
+//go:build linux && btrfs_noversion && cgo
+// +build linux,btrfs_noversion,cgo
package btrfs
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
index c5a64a500..96c4cdacb 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
@@ -8,7 +8,6 @@ import (
"bytes"
"errors"
"fmt"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -154,7 +153,7 @@ func readLVMConfig(root string) (directLVMConfig, error) {
var cfg directLVMConfig
p := filepath.Join(root, "setup-config.json")
- b, err := ioutil.ReadFile(p)
+ b, err := os.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
return cfg, nil
@@ -178,7 +177,7 @@ func writeLVMConfig(root string, cfg directLVMConfig) error {
if err != nil {
return fmt.Errorf("marshalling direct lvm config: %w", err)
}
- if err := ioutil.WriteFile(p, b, 0600); err != nil {
+ if err := os.WriteFile(p, b, 0600); err != nil {
return fmt.Errorf("writing direct lvm config to file: %w", err)
}
return nil
@@ -242,7 +241,7 @@ func setupDirectLVM(cfg directLVMConfig) error {
}
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
- err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
+ err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
if err != nil {
return fmt.Errorf("writing storage thinp autoextend profile: %w", err)
}
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
index 6989a4381..697a16fda 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
@@ -9,7 +9,6 @@ import (
"fmt"
"io"
"io/fs"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -331,7 +330,7 @@ func (devices *DeviceSet) removeMetadata(info *devInfo) error {
// Given json data and file path, write it to disk
func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error {
- tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp")
+ tmpFile, err := os.CreateTemp(devices.metadataDir(), ".tmp")
if err != nil {
return fmt.Errorf("devmapper: Error creating metadata file: %s", err)
}
@@ -630,7 +629,7 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
func (devices *DeviceSet) migrateOldMetaData() error {
// Migrate old metadata file
- jsonData, err := ioutil.ReadFile(devices.oldMetadataFile())
+ jsonData, err := os.ReadFile(devices.oldMetadataFile())
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -955,7 +954,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
func (devices *DeviceSet) loadMetadata(hash string) *devInfo {
info := &devInfo{Hash: hash, devices: devices}
- jsonData, err := ioutil.ReadFile(devices.metadataFile(info))
+ jsonData, err := os.ReadFile(devices.metadataFile(info))
if err != nil {
logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err)
return nil
@@ -1276,11 +1275,11 @@ func (devices *DeviceSet) setupBaseImage() error {
}
func setCloseOnExec(name string) {
- fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
- for _, i := range fileInfos {
- link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
+ fileEntries, _ := os.ReadDir("/proc/self/fd")
+ for _, e := range fileEntries {
+ link, _ := os.Readlink(filepath.Join("/proc/self/fd", e.Name()))
if link == name {
- fd, err := strconv.Atoi(i.Name())
+ fd, err := strconv.Atoi(e.Name())
if err == nil {
unix.CloseOnExec(fd)
}
@@ -1370,7 +1369,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
}
func (devices *DeviceSet) loadTransactionMetaData() error {
- jsonData, err := ioutil.ReadFile(devices.transactionMetaFile())
+ jsonData, err := os.ReadFile(devices.transactionMetaFile())
if err != nil {
// There is no active transaction. This will be the case
// during upgrade.
@@ -1451,7 +1450,7 @@ func (devices *DeviceSet) processPendingTransaction() error {
}
func (devices *DeviceSet) loadDeviceSetMetaData() error {
- jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile())
+ jsonData, err := os.ReadFile(devices.deviceSetMetaFile())
if err != nil {
// For backward compatibility return success if file does
// not exist.
@@ -2258,7 +2257,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
}
func (devices *DeviceSet) unmountAndDeactivateAll(dir string) {
- files, err := ioutil.ReadDir(dir)
+ files, err := os.ReadDir(dir)
if err != nil {
logrus.Warnf("devmapper: unmountAndDeactivate: %s", err)
return
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
index d2f165e26..f9f775a5d 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go
@@ -1,10 +1,10 @@
+//go:build linux && cgo
// +build linux,cgo
package devmapper
import (
"fmt"
- "io/ioutil"
"os"
"path"
"strconv"
@@ -227,7 +227,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems
- if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
+ if err := os.WriteFile(idFile, []byte(id), 0600); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err
diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
index 54db6ab4a..52f0e863e 100644
--- a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
+++ b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
@@ -1,3 +1,6 @@
+//go:build linux && cgo
+// +build linux,cgo
+
package devmapper
import jsoniter "github.com/json-iterator/go"
diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go
index d4f92e682..7d96ebe54 100644
--- a/vendor/github.com/containers/storage/drivers/driver.go
+++ b/vendor/github.com/containers/storage/drivers/driver.go
@@ -48,7 +48,7 @@ type CreateOpts struct {
ignoreChownErrors bool
}
-// MountOpts contains optional arguments for LayerStope.Mount() methods.
+// MountOpts contains optional arguments for Driver.Get() methods.
type MountOpts struct {
// Mount label is the MAC Labels to assign to mount point (SELINUX)
MountLabel string
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go
index f0e091004..5022037dc 100644
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go
@@ -33,10 +33,11 @@ type NaiveDiffDriver struct {
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
-// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
-// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
-// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
-// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
+//
+// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
+// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
+// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
+// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver {
return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater}
}
@@ -109,7 +110,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
// are extracted from tar's with full second precision on modified time.
// We need this hack here to make sure calls within same second receive
// correct result.
- time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
+ time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
return err
}), nil
}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go
index c43ab4c1e..0a0ad7dd5 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/check.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/check.go
@@ -6,7 +6,6 @@ package overlay
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -27,7 +26,7 @@ import (
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
// When these exist naive diff should be used.
func doesSupportNativeDiff(d, mountOpts string) error {
- td, err := ioutil.TempDir(d, "opaque-bug-check")
+ td, err := os.MkdirTemp(d, "opaque-bug-check")
if err != nil {
return err
}
@@ -82,7 +81,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
}()
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
- if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
+ if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
return fmt.Errorf("failed to write to merged directory: %w", err)
}
@@ -121,7 +120,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
// copying up a file from a lower layer unless/until its contents are being
// modified
func doesMetacopy(d, mountOpts string) (bool, error) {
- td, err := ioutil.TempDir(d, "metacopy-check")
+ td, err := os.MkdirTemp(d, "metacopy-check")
if err != nil {
return false, err
}
@@ -158,7 +157,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
if errors.Is(err, unix.EINVAL) {
- logrus.Info("metacopy option not supported on this kernel", mountOpts)
+ logrus.Infof("overlay: metacopy option not supported on this kernel, checked using options %q", mountOpts)
return false, nil
}
return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err)
@@ -186,7 +185,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
// doesVolatile checks if the filesystem supports the "volatile" mount option
func doesVolatile(d string) (bool, error) {
- td, err := ioutil.TempDir(d, "volatile-check")
+ td, err := os.MkdirTemp(d, "volatile-check")
if err != nil {
return false, err
}
@@ -224,7 +223,7 @@ func doesVolatile(d string) (bool, error) {
// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of
// a idmapped lower layer.
func supportsIdmappedLowerLayers(home string) (bool, error) {
- layerDir, err := ioutil.TempDir(home, "compat")
+ layerDir, err := os.MkdirTemp(home, "compat")
if err != nil {
return false, err
}
diff --git a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
index 4b7b0db44..a7924ff3b 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
@@ -5,7 +5,6 @@ package overlay
import (
"fmt"
- "io/ioutil"
"os"
"syscall"
"unsafe"
@@ -133,7 +132,7 @@ func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int,
for _, m := range idmap {
mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size)
}
- return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
+ return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
}
if err := writeMappings("uid_map", uidMaps); err != nil {
cleanupFunc()
diff --git a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
index 2a1e9d0cc..bedda3507 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
@@ -1,3 +1,6 @@
+//go:build linux
+// +build linux
+
package overlay
import jsoniter "github.com/json-iterator/go"
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index 434d43521..844d2c793 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -9,7 +9,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -346,7 +345,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m)
}
- if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
+ if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
return nil, err
}
} else {
@@ -579,11 +578,11 @@ func cachedFeatureSet(feature string, set bool) string {
}
func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) {
- content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
+ content, err := os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
if err == nil {
return true, string(content), nil
}
- content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
+ content, err = os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
if err == nil {
return false, string(content), nil
}
@@ -607,7 +606,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
}
var contents string
- flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home))
+ flagContent, err := os.ReadFile(getMountProgramFlagFile(home))
if err == nil {
contents = strings.TrimSpace(string(flagContent))
}
@@ -620,7 +619,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
if err != nil && !os.IsNotExist(err) {
return false, err
}
- if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
+ if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
return false, err
}
if needsMountProgram {
@@ -656,7 +655,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
logLevel = logrus.DebugLevel
}
- layerDir, err := ioutil.TempDir(home, "compat")
+ layerDir, err := os.MkdirTemp(home, "compat")
if err != nil {
patherr, ok := err.(*os.PathError)
if ok && patherr.Err == syscall.ENOSPC {
@@ -715,7 +714,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
logrus.Debugf("overlay: test mount with multiple lowers succeeded")
return supportsDType, nil
}
- logrus.Debugf("overlay: test mount with multiple lowers failed %v", err)
+ logrus.Debugf("overlay: test mount with multiple lowers failed: %v", err)
}
flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir)
if selinux.GetEnabled() {
@@ -727,7 +726,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower")
return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported)
}
- logrus.Debugf("overlay: test mount with a single lower failed %v", err)
+ logrus.Debugf("overlay: test mount with a single lower failed: %v", err)
}
logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home)
return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS)
@@ -1008,7 +1007,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
// Write link id to link file
- if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
+ if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
return err
}
@@ -1029,7 +1028,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
return err
}
if lower != "" {
- if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
+ if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
return err
}
}
@@ -1072,7 +1071,7 @@ func (d *Driver) getLower(parent string) (string, error) {
}
// Read Parent link fileA
- parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link"))
+ parentLink, err := os.ReadFile(path.Join(parentDir, "link"))
if err != nil {
if !os.IsNotExist(err) {
return "", err
@@ -1081,14 +1080,14 @@ func (d *Driver) getLower(parent string) (string, error) {
if err := d.recreateSymlinks(); err != nil {
return "", fmt.Errorf("recreating the links: %w", err)
}
- parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link"))
+ parentLink, err = os.ReadFile(path.Join(parentDir, "link"))
if err != nil {
return "", err
}
}
lowers := []string{path.Join(linkDir, string(parentLink))}
- parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile))
+ parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile))
if err == nil {
parentLowers := strings.Split(string(parentLower), ":")
lowers = append(lowers, parentLowers...)
@@ -1117,7 +1116,7 @@ func (d *Driver) dir2(id string) (string, bool) {
func (d *Driver) getLowerDirs(id string) ([]string, error) {
var lowersArray []string
- lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
+ lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile))
if err == nil {
for _, s := range strings.Split(string(lowers), ":") {
lower := d.dir(s)
@@ -1186,7 +1185,7 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa
// Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error {
dir := d.dir(id)
- lid, err := ioutil.ReadFile(path.Join(dir, "link"))
+ lid, err := os.ReadFile(path.Join(dir, "link"))
if err == nil {
if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
logrus.Debugf("Failed to remove link: %v", err)
@@ -1209,7 +1208,7 @@ func (d *Driver) recreateSymlinks() error {
const maxIterations = 10
// List all the directories under the home directory
- dirs, err := ioutil.ReadDir(d.home)
+ dirs, err := os.ReadDir(d.home)
if err != nil {
return fmt.Errorf("reading driver home directory %q: %w", d.home, err)
}
@@ -1228,11 +1227,11 @@ func (d *Driver) recreateSymlinks() error {
// the layer's "link" file that points to the layer's "diff" directory.
for _, dir := range dirs {
// Skip over the linkDir and anything that is not a directory
- if dir.Name() == linkDir || !dir.Mode().IsDir() {
+ if dir.Name() == linkDir || !dir.IsDir() {
continue
}
// Read the "link" file under each layer to get the name of the symlink
- data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link"))
+ data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link"))
if err != nil {
errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
continue
@@ -1257,7 +1256,7 @@ func (d *Driver) recreateSymlinks() error {
linkDirFullPath := filepath.Join(d.home, "l")
// Now check if we somehow lost a "link" file, by making sure
// that each symlink we have corresponds to one.
- links, err := ioutil.ReadDir(linkDirFullPath)
+ links, err := os.ReadDir(linkDirFullPath)
if err != nil {
errs = multierror.Append(errs, err)
continue
@@ -1287,11 +1286,11 @@ func (d *Driver) recreateSymlinks() error {
// it has the basename of our symlink in it.
targetID := targetComponents[1]
linkFile := filepath.Join(d.dir(targetID), "link")
- data, err := ioutil.ReadFile(linkFile)
+ data, err := os.ReadFile(linkFile)
if err != nil || string(data) != link.Name() {
// NOTE: If two or more links point to the same target, we will update linkFile
// with every value of link.Name(), and set madeProgress = true every time.
- if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
+ if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
continue
}
@@ -1311,7 +1310,7 @@ func (d *Driver) recreateSymlinks() error {
}
// Get creates and mounts the required file system for the given id and returns the mount path.
-func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
+func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return d.get(id, false, options)
}
@@ -1346,7 +1345,12 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if !d.usingMetacopy {
if hasMetacopyOption(optsList) {
if d.options.mountProgram == "" {
- logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel")
+ release := ""
+ var uts unix.Utsname
+ if err := unix.Uname(&uts); err == nil {
+ release = " " + string(uts.Release[:]) + " " + string(uts.Version[:])
+ }
+ logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release)
} else {
logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it")
}
@@ -1361,7 +1365,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
- lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
+ lowers, err := os.ReadFile(path.Join(dir, lowerFile))
if err != nil && !os.IsNotExist(err) {
return "", err
}
@@ -1377,7 +1381,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers
// lists that we're building. "diff" itself is the upper, so it won't be in the lists.
- link, err := ioutil.ReadFile(path.Join(dir, "link"))
+ link, err := os.ReadFile(path.Join(dir, "link"))
if err != nil {
if !os.IsNotExist(err) {
return "", err
@@ -1386,7 +1390,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if err := d.recreateSymlinks(); err != nil {
return "", fmt.Errorf("recreating the links: %w", err)
}
- link, err = ioutil.ReadFile(path.Join(dir, "link"))
+ link, err = os.ReadFile(path.Join(dir, "link"))
if err != nil {
return "", err
}
@@ -1652,7 +1656,7 @@ func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
- if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
+ if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
return err
}
@@ -1661,7 +1665,7 @@ func (d *Driver) Put(id string) error {
mappedRoot := filepath.Join(d.home, id, "mapped")
// It should not happen, but cleanup any mapped mount if it was leaked.
if _, err := os.Stat(mappedRoot); err == nil {
- mounts, err := ioutil.ReadDir(mappedRoot)
+ mounts, err := os.ReadDir(mappedRoot)
if err == nil {
// Go through all of the mapped mounts.
for _, m := range mounts {
@@ -1809,7 +1813,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
if err != nil && !os.IsExist(err) {
return graphdriver.DriverWithDifferOutput{}, err
}
- applyDir, err = ioutil.TempDir(d.getStagingDir(), "")
+ applyDir, err = os.MkdirTemp(d.getStagingDir(), "")
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@@ -2170,7 +2174,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
}
// tell the additional layer store that we use this layer.
// mark this layer as "additional layer"
- if err := ioutil.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
+ if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
return err
}
notifyUseAdditionalLayer(al.path)
@@ -2178,7 +2182,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
}
func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) {
- al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer"))
+ al, err := os.ReadFile(path.Join(d.dir(id), "additionallayer"))
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
index 0e6a47fc9..ed4c7eaa5 100644
--- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go
+++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go
@@ -52,7 +52,6 @@ struct fsxattr {
import "C"
import (
"fmt"
- "io/ioutil"
"math"
"os"
"path"
@@ -123,11 +122,9 @@ func generateUniqueProjectID(path string) (uint32, error) {
// This is a way to prevent xfs_quota management from conflicting with
// containers/storage.
-//
// Then try to create a test directory with the next project id and set a quota
// on it. If that works, continue to scan existing containers to map allocated
// project ids.
-//
func NewControl(basePath string) (*Control, error) {
//
// Get project id of parent dir as minimal id to be used by driver
@@ -336,7 +333,7 @@ func setProjectID(targetPath string, projectID uint32) error {
// findNextProjectID - find the next project id to be used for containers
// by scanning driver home directory to find used project ids
func (q *Control) findNextProjectID(home string) error {
- files, err := ioutil.ReadDir(home)
+ files, err := os.ReadDir(home)
if err != nil {
return fmt.Errorf("read directory failed : %s", home)
}
diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go
index 7baf6c075..7def16cd3 100644
--- a/vendor/github.com/containers/storage/drivers/windows/windows.go
+++ b/vendor/github.com/containers/storage/drivers/windows/windows.go
@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -24,7 +23,7 @@ import (
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
- "github.com/containers/storage/drivers"
+ graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
@@ -475,7 +474,7 @@ func (d *Driver) Put(id string) error {
// We use this opportunity to cleanup any -removing folders which may be
// still left if the daemon was killed while it was removing a layer.
func (d *Driver) Cleanup() error {
- items, err := ioutil.ReadDir(d.info.HomeDir)
+ items, err := os.ReadDir(d.info.HomeDir)
if err != nil {
if os.IsNotExist(err) {
return nil
@@ -870,7 +869,7 @@ func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ..
// resolveID computes the layerID information based on the given id.
func (d *Driver) resolveID(id string) (string, error) {
- content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID"))
+ content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID"))
if os.IsNotExist(err) {
return id, nil
} else if err != nil {
@@ -881,13 +880,13 @@ func (d *Driver) resolveID(id string) (string, error) {
// setID stores the layerId in disk.
func (d *Driver) setID(id, altID string) error {
- return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
+ return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
}
// getLayerChain returns the layer chain information.
func (d *Driver) getLayerChain(id string) ([]string, error) {
jPath := filepath.Join(d.dir(id), "layerchain.json")
- content, err := ioutil.ReadFile(jPath)
+ content, err := os.ReadFile(jPath)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
@@ -911,7 +910,7 @@ func (d *Driver) setLayerChain(id string, chain []string) error {
}
jPath := filepath.Join(d.dir(id), "layerchain.json")
- err = ioutil.WriteFile(jPath, content, 0600)
+ err = os.WriteFile(jPath, content, 0600)
if err != nil {
return fmt.Errorf("unable to write layerchain file - %s", err)
}
diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
index eedaeed9d..0d4001783 100644
--- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go
+++ b/vendor/github.com/containers/storage/drivers/zfs/zfs.go
@@ -18,7 +18,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
- "github.com/mistifyio/go-zfs"
+ zfs "github.com/mistifyio/go-zfs/v3"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index ad3389722..c76a6c9f9 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -3,7 +3,6 @@ package storage
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -261,7 +260,7 @@ func (i *Image) recomputeDigests() error {
func (r *imageStore) Load() error {
shouldSave := false
rpath := r.imagespath()
- data, err := ioutil.ReadFile(rpath)
+ data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -636,7 +635,7 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) {
if !ok {
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
- return ioutil.ReadFile(r.datapath(image.ID, key))
+ return os.ReadFile(r.datapath(image.ID, key))
}
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go
index 0863648ae..18f3630e9 100644
--- a/vendor/github.com/containers/storage/layers.go
+++ b/vendor/github.com/containers/storage/layers.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -352,7 +351,7 @@ func (r *layerStore) Load() error {
} else {
r.layerspathModified = info.ModTime()
}
- data, err := ioutil.ReadFile(rpath)
+ data, err := os.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -443,7 +442,7 @@ func (r *layerStore) LoadLocked() error {
func (r *layerStore) loadMounts() error {
mounts := make(map[string]*Layer)
mpath := r.mountspath()
- data, err := ioutil.ReadFile(mpath)
+ data, err := os.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -754,7 +753,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
templateCompressionType = templateLayer.CompressionType
templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
- templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID))
+ templateTSdata, tserr = os.ReadFile(r.tspath(templateLayer.ID))
if tserr != nil && !os.IsNotExist(tserr) {
return nil, -1, tserr
}
@@ -1389,6 +1388,9 @@ func (r *layerStore) Wipe() error {
for id := range r.byid {
ids = append(ids, id)
}
+ sort.Slice(ids, func(i, j int) bool {
+ return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created)
+ })
for _, id := range ids {
if err := r.Delete(id); err != nil {
return err
@@ -1668,7 +1670,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if compressedDigester != nil {
compressedWriter = compressedDigester.Hash()
} else {
- compressedWriter = ioutil.Discard
+ compressedWriter = io.Discard
}
compressedCounter := ioutils.NewWriteCounter(compressedWriter)
defragmented = io.TeeReader(defragmented, compressedCounter)
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go
index fc9ca3602..1d7bbfa98 100644
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go
@@ -9,7 +9,6 @@ import (
"fmt"
"io"
"io/fs"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -483,7 +482,7 @@ func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *
}
// canonicalTarName provides a platform-independent and consistent posix-style
-//path for files and directories to be archived regardless of the platform.
+// path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) (string, error) {
name, err := CanonicalTarNameForPath(name)
if err != nil {
@@ -1106,7 +1105,9 @@ loop:
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
-// identity (uncompressed), gzip, bzip2, xz.
+//
+// identity (uncompressed), gzip, bzip2, xz.
+//
// FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return untarHandler(tarArchive, dest, options, true)
@@ -1347,7 +1348,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
- f, err := ioutil.TempFile(dir, "")
+ f, err := os.CreateTemp(dir, "")
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go
index c7bb25d0f..6cd9e35e9 100644
--- a/vendor/github.com/containers/storage/pkg/archive/changes.go
+++ b/vendor/github.com/containers/storage/pkg/archive/changes.go
@@ -5,7 +5,6 @@ import (
"bytes"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -403,7 +402,7 @@ func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string,
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
- emptyDir, err := ioutil.TempDir("", "empty")
+ emptyDir, err := os.MkdirTemp("", "empty")
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go
index 6298a674d..2c714e8da 100644
--- a/vendor/github.com/containers/storage/pkg/archive/copy.go
+++ b/vendor/github.com/containers/storage/pkg/archive/copy.go
@@ -4,7 +4,6 @@ import (
"archive/tar"
"errors"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -255,7 +254,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
- return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ return dstInfo.Path, io.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go
index 59a3207fd..7e835d44b 100644
--- a/vendor/github.com/containers/storage/pkg/archive/diff.go
+++ b/vendor/github.com/containers/storage/pkg/archive/diff.go
@@ -5,7 +5,6 @@ import (
"fmt"
"io"
"io/fs"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -102,7 +101,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
- if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil {
+ if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
index d66c98b30..b5d8961e5 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
@@ -4,7 +4,6 @@ import (
stdtar "archive/tar"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"sync"
@@ -31,7 +30,8 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
-// identity (uncompressed), gzip, bzip2, xz.
+//
+// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, true, dest)
}
@@ -82,7 +82,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
}
}
- r := ioutil.NopCloser(tarArchive)
+ r := io.NopCloser(tarArchive)
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
index 2d64c2800..8cc0f33b3 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go
@@ -9,7 +9,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -111,7 +110,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
// when `xz -d -c -q | storage-untar ...` failed on storage-untar side,
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
// pending on write pipe forever
- io.Copy(ioutil.Discard, decompressedArchive)
+ io.Copy(io.Discard, decompressedArchive)
return fmt.Errorf("processing tar file(%s): %w", output, err)
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
index 255882174..09ef6d5de 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go
@@ -2,7 +2,6 @@ package chrootarchive
import (
"fmt"
- "io/ioutil"
"net"
"os"
"os/user"
@@ -51,7 +50,7 @@ func chroot(path string) (err error) {
}
// setup oldRoot for pivot_root
- pivotDir, err := ioutil.TempDir(path, ".pivot_root")
+ pivotDir, err := os.MkdirTemp(path, ".pivot_root")
if err != nil {
return fmt.Errorf("setting up pivot dir: %w", err)
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
index d6326c808..52c677bc7 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go
@@ -3,7 +3,6 @@ package chrootarchive
import (
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
@@ -26,7 +25,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
layer = decompressed
}
- tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract")
+ tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
if err != nil {
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
index 511c61761..90f453913 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go
@@ -8,7 +8,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -56,7 +55,7 @@ func applyLayer() {
options.InUserNS = true
}
- if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil {
+ if tmpDir, err = os.MkdirTemp("/", "temp-storage-extract"); err != nil {
fatal(err)
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
index 8f8e88bfb..8bfff5d65 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go
@@ -3,7 +3,6 @@ package chrootarchive
import (
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
@@ -30,7 +29,7 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
layer = decompressed
}
- tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract")
+ tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
if err != nil {
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
}
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
index 45caec972..274a946e2 100644
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go
@@ -1,3 +1,4 @@
+//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive
@@ -5,7 +6,6 @@ package chrootarchive
import (
"fmt"
"io"
- "io/ioutil"
"os"
"github.com/containers/storage/pkg/reexec"
@@ -25,5 +25,5 @@ func fatal(err error) {
// flush consumes all the bytes from the reader discarding
// any errors
func flush(r io.Reader) (bytes int64, err error) {
- return io.Copy(ioutil.Discard, r)
+ return io.Copy(io.Discard, r)
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
index c88091393..727956799 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"sort"
"strconv"
@@ -128,7 +127,7 @@ func (c *layersCache) load() error {
}
defer manifestReader.Close()
- manifest, err := ioutil.ReadAll(manifestReader)
+ manifest, err := io.ReadAll(manifestReader)
if err != nil {
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
}
@@ -334,7 +333,7 @@ func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error)
}()
defer pipeReader.Close()
- counter := ioutils.NewWriteCounter(ioutil.Discard)
+ counter := ioutils.NewWriteCounter(io.Discard)
r := io.TeeReader(pipeReader, counter)
diff --git a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
index aeb7cfd4f..362c168d0 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
@@ -8,7 +8,6 @@ import (
"bufio"
"encoding/base64"
"io"
- "io/ioutil"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/ioutils"
@@ -21,9 +20,7 @@ const holesThreshold = int64(1 << 10)
type holesFinder struct {
reader *bufio.Reader
- fileOff int64
zeros int64
- from int64
threshold int64
state int
@@ -432,7 +429,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level
go func() {
ch <- writeZstdChunkedStream(out, metadata, r, level)
- io.Copy(ioutil.Discard, r)
+ io.Copy(io.Discard, r)
r.Close()
close(ch)
}()
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage.go b/vendor/github.com/containers/storage/pkg/chunked/storage.go
index 9212cbbcf..f0bd36273 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage.go
@@ -1,7 +1,6 @@
package chunked
import (
- "fmt"
"io"
)
@@ -22,5 +21,5 @@ type ErrBadRequest struct {
}
func (e ErrBadRequest) Error() string {
- return fmt.Sprintf("bad request")
+ return "bad request"
}
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
index 7278f2d88..83d6e2f88 100644
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
@@ -8,7 +8,6 @@ import (
"fmt"
"hash"
"io"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -657,7 +656,7 @@ func (c *chunkedDiffer) prepareCompressedStreamToFile(partCompression compressed
// Only the missing chunk in the requested part refers to a hole.
// The received data must be discarded.
limitReader := io.LimitReader(from, mf.CompressedSize)
- _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer)
+ _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer)
return fileTypeHole, err
case partCompression == fileTypeZstdChunked:
c.rawReader = io.LimitReader(from, mf.CompressedSize)
@@ -856,7 +855,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
for _, mf := range missingPart.Chunks {
if mf.Gap > 0 {
limitReader := io.LimitReader(part, mf.Gap)
- _, err := io.CopyBuffer(ioutil.Discard, limitReader, c.copyBuffer)
+ _, err := io.CopyBuffer(io.Discard, limitReader, c.copyBuffer)
if err != nil {
Err = err
goto exit
@@ -906,7 +905,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
goto exit
}
if c.rawReader != nil {
- if _, err := io.CopyBuffer(ioutil.Discard, c.rawReader, c.copyBuffer); err != nil {
+ if _, err := io.CopyBuffer(io.Discard, c.rawReader, c.copyBuffer); err != nil {
Err = err
goto exit
}
diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/containers/storage/pkg/directory/directory.go
index b0ce706e5..829fe59f3 100644
--- a/vendor/github.com/containers/storage/pkg/directory/directory.go
+++ b/vendor/github.com/containers/storage/pkg/directory/directory.go
@@ -1,7 +1,6 @@
package directory
import (
- "io/ioutil"
"os"
"path/filepath"
)
@@ -15,7 +14,7 @@ type DiskUsage struct {
// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path
func MoveToSubdir(oldpath, subdir string) error {
- infos, err := ioutil.ReadDir(oldpath)
+ infos, err := os.ReadDir(oldpath)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
index 92056c1d5..92e0263d8 100644
--- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
+++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go
@@ -1,10 +1,10 @@
+//go:build linux || freebsd
// +build linux freebsd
package fileutils
import (
"fmt"
- "io/ioutil"
"os"
"github.com/sirupsen/logrus"
@@ -13,7 +13,7 @@ import (
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
- if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
logrus.Errorf("%v", err)
} else {
return len(fds)
diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
index e6094b55b..9854cac1c 100644
--- a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
+++ b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go
@@ -1,10 +1,10 @@
+//go:build linux
// +build linux
package fsutils
import (
"fmt"
- "io/ioutil"
"os"
"unsafe"
@@ -12,14 +12,14 @@ import (
)
func locateDummyIfEmpty(path string) (string, error) {
- children, err := ioutil.ReadDir(path)
+ children, err := os.ReadDir(path)
if err != nil {
return "", err
}
if len(children) != 0 {
return "", nil
}
- dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
+ dummyFile, err := os.CreateTemp(path, "fsutils-dummy")
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
index a7f4eaf13..a57609067 100644
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go
@@ -3,7 +3,6 @@ package idtools
import (
"bufio"
"fmt"
- "io/ioutil"
"os"
"os/user"
"runtime"
@@ -219,7 +218,7 @@ func getOverflowUID() int {
overflowUIDOnce.Do(func() {
// 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present
overflowUID = 65534
- if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowuid"); err == nil {
+ if content, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil {
if tmp, err := strconv.Atoi(string(content)); err == nil {
overflowUID = tmp
}
@@ -233,7 +232,7 @@ func getOverflowGID() int {
overflowGIDOnce.Do(func() {
// 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present
overflowGID = 65534
- if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowgid"); err == nil {
+ if content, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil {
if tmp, err := strconv.Atoi(string(content)); err == nil {
overflowGID = tmp
}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
index cd12470f9..a74893e81 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go
@@ -2,7 +2,6 @@ package ioutils
import (
"io"
- "io/ioutil"
"os"
"path/filepath"
)
@@ -28,7 +27,7 @@ func SetDefaultOptions(opts AtomicFileWriterOptions) {
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) {
- f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
@@ -124,7 +123,7 @@ type AtomicWriteSet struct {
// commit. If no temporary directory is given the system
// default is used.
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
- td, err := ioutil.TempDir(tmpDir, "write-set-")
+ td, err := os.MkdirTemp(tmpDir, "write-set-")
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
index 1539ad21b..9d5af610e 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go
@@ -1,10 +1,11 @@
+//go:build !windows
// +build !windows
package ioutils
-import "io/ioutil"
+import "os"
-// TempDir on Unix systems is equivalent to ioutil.TempDir.
+// TempDir on Unix systems is equivalent to os.MkdirTemp.
func TempDir(dir, prefix string) (string, error) {
- return ioutil.TempDir(dir, prefix)
+ return os.MkdirTemp(dir, prefix)
}
diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
index c719c120b..2c2242d69 100644
--- a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
+++ b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go
@@ -1,16 +1,17 @@
+//go:build windows
// +build windows
package ioutils
import (
- "io/ioutil"
+ "os"
"github.com/containers/storage/pkg/longpath"
)
-// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format.
func TempDir(dir, prefix string) (string, error) {
- tempDir, err := ioutil.TempDir(dir, prefix)
+ tempDir, err := os.MkdirTemp(dir, prefix)
if err != nil {
return "", err
}
diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
index b04c1ad05..3c242016d 100644
--- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
+++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
@@ -65,19 +65,19 @@ func newLastWriterID() []byte {
}
// openLock opens the file at path and returns the corresponding file
-// descriptor. Note that the path is opened read-only when ro is set. If ro
-// is unset, openLock will open the path read-write and create the file if
-// necessary.
+// descriptor. The path is opened either read-only or read-write,
+// depending on the value of ro argument.
+//
+// openLock will create the file and its parent directories,
+// if necessary.
func openLock(path string, ro bool) (fd int, err error) {
+ flags := unix.O_CLOEXEC | os.O_CREATE
if ro {
- fd, err = unix.Open(path, os.O_RDONLY|unix.O_CLOEXEC|os.O_CREATE, 0)
+ flags |= os.O_RDONLY
} else {
- fd, err = unix.Open(path,
- os.O_RDWR|unix.O_CLOEXEC|os.O_CREATE,
- unix.S_IRUSR|unix.S_IWUSR|unix.S_IRGRP|unix.S_IROTH,
- )
+ flags |= os.O_RDWR
}
-
+ fd, err = unix.Open(path, flags, 0o644)
if err == nil {
return
}
@@ -91,7 +91,7 @@ func openLock(path string, ro bool) (fd int, err error) {
return openLock(path, ro)
}
- return
+ return fd, &os.PathError{Op: "open", Path: path, Err: err}
}
// createLockerForPath returns a Locker object, possibly (depending on the platform)
@@ -158,7 +158,7 @@ func (l *lockfile) lock(lType int16, recursive bool) {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
- panic(fmt.Sprintf("error opening %q: %v", l.file, err))
+ panic(err)
}
l.fd = uintptr(fd)
diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
index 6a7752437..edc588a63 100644
--- a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
+++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go
@@ -10,13 +10,14 @@ import (
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
+ atFdCwd := unix.AT_FDCWD
+
var _path *byte
_path, err := unix.BytePtrFromString(path)
if err != nil {
return err
}
-
- if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
+ if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
return err
}
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
index b7ad1e19e..3fc36201c 100644
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go
@@ -9,7 +9,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"os/signal"
@@ -390,7 +389,7 @@ const (
// hasFullUsersMappings checks whether the current user namespace has all the IDs mapped.
func hasFullUsersMappings() (bool, error) {
- content, err := ioutil.ReadFile("/proc/self/uid_map")
+ content, err := os.ReadFile("/proc/self/uid_map")
if err != nil {
return false, err
}
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf
index 1814b4727..1294f6a9a 100644
--- a/vendor/github.com/containers/storage/storage.conf
+++ b/vendor/github.com/containers/storage/storage.conf
@@ -65,7 +65,7 @@ pull_options = {enable_partial_images = "false", use_hard_links = "false", ostre
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
-# listed and will be needed by libraries, but there are limits to the number of
+# listed and will be heeded by libraries, but there are limits to the number of
# mappings which the kernel will allow when you later attempt to run a
# container.
#
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index afcf8ee70..fb1faaa13 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -38,6 +37,13 @@ const (
removeNames
)
+const (
+ volatileFlag = "Volatile"
+ mountLabelFlag = "MountLabel"
+ processLabelFlag = "ProcessLabel"
+ mountOptsFlag = "MountOpts"
+)
+
var (
stores []*store
storesLock sync.Mutex
@@ -632,16 +638,17 @@ type store struct {
// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used.
//
// These defaults observe environment variables:
-// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use
-// * `STORAGE_OPTS` for the string of options to pass to the driver
+// - `STORAGE_DRIVER` for the name of the storage driver to attempt to use
+// - `STORAGE_OPTS` for the string of options to pass to the driver
//
// Note that we do some of this work in a child process. The calling process's
// main() function needs to import our pkg/reexec package and should begin with
// something like this in order to allow us to properly start that child
// process:
-// if reexec.Init() {
-// return
-// }
+//
+// if reexec.Init() {
+// return
+// }
func GetStore(options types.StoreOptions) (Store, error) {
defaultOpts, err := types.Options()
if err != nil {
@@ -1399,11 +1406,10 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if options.Flags == nil {
options.Flags = make(map[string]interface{})
}
- plabel, _ := options.Flags["ProcessLabel"].(string)
- mlabel, _ := options.Flags["MountLabel"].(string)
- if (plabel == "" && mlabel != "") ||
- (plabel != "" && mlabel == "") {
- return nil, errors.New("processLabel and Mountlabel must either not be specified or both specified")
+ plabel, _ := options.Flags[processLabelFlag].(string)
+ mlabel, _ := options.Flags[mountLabelFlag].(string)
+ if (plabel == "" && mlabel != "") || (plabel != "" && mlabel == "") {
+ return nil, errors.New("ProcessLabel and Mountlabel must either not be specified or both specified")
}
if plabel == "" {
@@ -1411,11 +1417,12 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if err != nil {
return nil, err
}
- options.Flags["ProcessLabel"] = processLabel
- options.Flags["MountLabel"] = mountLabel
+ mlabel = mountLabel
+ options.Flags[processLabelFlag] = processLabel
+ options.Flags[mountLabelFlag] = mountLabel
}
- clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), options.StorageOpt, layerOptions, true)
+ clayer, err := rlstore.Create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true)
if err != nil {
return nil, err
}
@@ -2790,8 +2797,10 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
options.GidMaps = container.GIDMap
options.Options = container.MountOpts()
if !s.disableVolatile {
- if v, found := container.Flags["Volatile"]; found {
- options.Volatile = v.(bool)
+ if v, found := container.Flags[volatileFlag]; found {
+ if b, ok := v.(bool); ok {
+ options.Volatile = b
+ }
}
}
}
@@ -3541,7 +3550,7 @@ func (s *store) FromContainerDirectory(id, file string) ([]byte, error) {
if err != nil {
return nil, err
}
- return ioutil.ReadFile(filepath.Join(dir, file))
+ return os.ReadFile(filepath.Join(dir, file))
}
func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error {
@@ -3561,7 +3570,7 @@ func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) {
if err != nil {
return nil, err
}
- return ioutil.ReadFile(filepath.Join(dir, file))
+ return os.ReadFile(filepath.Join(dir, file))
}
func (s *store) Shutdown(force bool) ([]string, error) {
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go
index 88641d424..c54de7635 100644
--- a/vendor/github.com/containers/storage/types/utils.go
+++ b/vendor/github.com/containers/storage/types/utils.go
@@ -3,7 +3,6 @@ package types
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -75,7 +74,7 @@ func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, e
return runtimeDir, nil
}
- initCommand, err := ioutil.ReadFile(env.getProcCommandFile())
+ initCommand, err := os.ReadFile(env.getProcCommandFile())
if err != nil || string(initCommand) == "systemd" {
runUserDir := env.getRunUserDir()
if isRootlessRuntimeDirOwner(runUserDir, env) {
diff --git a/vendor/github.com/containers/storage/userns.go b/vendor/github.com/containers/storage/userns.go
index e0e530275..6e200ec12 100644
--- a/vendor/github.com/containers/storage/userns.go
+++ b/vendor/github.com/containers/storage/userns.go
@@ -226,7 +226,7 @@ func (s *store) getAutoUserNS(options *types.AutoUserNsOptions, image *Image) ([
return nil, nil, fmt.Errorf("cannot read mappings: %w", err)
}
- // Look every container that is using a user namespace and store
+ // Look at every container that is using a user namespace and store
// the intervals that are already used.
containers, err := s.Containers()
if err != nil {
diff --git a/vendor/github.com/containers/storage/utils.go b/vendor/github.com/containers/storage/utils.go
index 37d4b79b0..ae9600e68 100644
--- a/vendor/github.com/containers/storage/utils.go
+++ b/vendor/github.com/containers/storage/utils.go
@@ -16,12 +16,12 @@ func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
return types.GetRootlessRuntimeDir(rootlessUID)
}
-// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers
+// DefaultStoreOptionsAutoDetectUID returns the default storage options for containers
func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) {
return types.DefaultStoreOptionsAutoDetectUID()
}
-// DefaultStoreOptions returns the default storage ops for containers
+// DefaultStoreOptions returns the default storage options for containers
func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) {
return types.DefaultStoreOptions(rootless, rootlessUID)
}
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
index fd2b3a42b..087320da7 100644
--- a/vendor/github.com/google/go-cmp/cmp/compare.go
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -13,21 +13,21 @@
//
// The primary features of cmp are:
//
-// • When the default behavior of equality does not suit the needs of the test,
-// custom equality functions can override the equality operation.
-// For example, an equality function may report floats as equal so long as they
-// are within some tolerance of each other.
+// - When the default behavior of equality does not suit the test's needs,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as
+// they are within some tolerance of each other.
//
-// • Types that have an Equal method may use that method to determine equality.
-// This allows package authors to determine the equality operation for the types
-// that they define.
+// - Types with an Equal method may use that method to determine equality.
+// This allows package authors to determine the equality operation
+// for the types that they define.
//
-// • If no custom equality functions are used and no Equal method is defined,
-// equality is determined by recursively comparing the primitive kinds on both
-// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
-// fields are not compared by default; they result in panics unless suppressed
-// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
-// compared using the Exporter option.
+// - If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on
+// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
+// unexported fields are not compared by default; they result in panics
+// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
+// or explicitly compared using the Exporter option.
package cmp
import (
@@ -45,25 +45,25 @@ import (
// Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values:
//
-// • Let S be the set of all Ignore, Transformer, and Comparer options that
-// remain after applying all path filters, value filters, and type filters.
-// If at least one Ignore exists in S, then the comparison is ignored.
-// If the number of Transformer and Comparer options in S is greater than one,
-// then Equal panics because it is ambiguous which option to use.
-// If S contains a single Transformer, then use that to transform the current
-// values and recursively call Equal on the output values.
-// If S contains a single Comparer, then use that to compare the current values.
-// Otherwise, evaluation proceeds to the next rule.
+// - Let S be the set of all Ignore, Transformer, and Comparer options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one Ignore exists in S, then the comparison is ignored.
+// If the number of Transformer and Comparer options in S is non-zero,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single Transformer, then use that to transform
+// the current values and recursively call Equal on the output values.
+// If S contains a single Comparer, then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
//
-// • If the values have an Equal method of the form "(T) Equal(T) bool" or
-// "(T) Equal(I) bool" where T is assignable to I, then use the result of
-// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
-// evaluation proceeds to the next rule.
+// - If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+// evaluation proceeds to the next rule.
//
-// • Lastly, try to compare x and y based on their basic kinds.
-// Simple kinds like booleans, integers, floats, complex numbers, strings, and
-// channels are compared using the equivalent of the == operator in Go.
-// Functions are only equal if they are both nil, otherwise they are unequal.
+// - Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings,
+// and channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
//
// Structs are equal if recursively calling Equal on all fields report equal.
// If a struct contains unexported fields, Equal panics unless an Ignore option
@@ -144,7 +144,7 @@ func rootStep(x, y interface{}) PathStep {
// so that they have the same parent type.
var t reflect.Type
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
- t = reflect.TypeOf((*interface{})(nil)).Elem()
+ t = anyType
if vx.IsValid() {
vvx := reflect.New(t).Elem()
vvx.Set(vx)
@@ -639,7 +639,9 @@ type dynChecker struct{ curr, next int }
// Next increments the state and reports whether a check should be performed.
//
// Checks occur every Nth function call, where N is a triangular number:
+//
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+//
// See https://en.wikipedia.org/wiki/Triangular_number
//
// This sequence ensures that the cost of checks drops significantly as
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
index bc196b16c..a248e5436 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
// This function returns an edit-script, which is a sequence of operations
// needed to convert one list into the other. The following invariants for
// the edit-script are maintained:
-// • eq == (es.Dist()==0)
-// • nx == es.LenX()
-// • ny == es.LenY()
+// - eq == (es.Dist()==0)
+// - nx == es.LenX()
+// - ny == es.LenY()
//
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
// produces an edit-script with a minimal Levenshtein distance). This algorithm
@@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
- // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
- // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+ // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+ // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
- // • fwdFrontier.X < revFrontier.X
- // • fwdFrontier.Y < revFrontier.Y
+ // - fwdFrontier.X < revFrontier.X
+ // - fwdFrontier.Y < revFrontier.Y
+ //
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
@@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
- // • Searching for differences switches back-and-forth between
- // a search that starts at the beginning (the top-left corner), and
- // a search that starts at the end (the bottom-right corner). The goal of
- // the search is connect with the search from the opposite corner.
- // • As we search, we build a path in a greedy manner, where the first
- // match seen is added to the path (this is sub-optimal, but provides a
- // decent result in practice). When matches are found, we try the next pair
- // of symbols in the lists and follow all matches as far as possible.
- // • When searching for matches, we search along a diagonal going through
- // through the "frontier" point. If no matches are found, we advance the
- // frontier towards the opposite corner.
- // • This algorithm terminates when either the X coordinates or the
- // Y coordinates of the forward and reverse frontier points ever intersect.
+ // - Searching for differences switches back-and-forth between
+ // a search that starts at the beginning (the top-left corner), and
+ // a search that starts at the end (the bottom-right corner).
+ // The goal of the search is connect with the search
+ // from the opposite corner.
+ // - As we search, we build a path in a greedy manner,
+ // where the first match seen is added to the path (this is sub-optimal,
+ // but provides a decent result in practice). When matches are found,
+ // we try the next pair of symbols in the lists and follow all matches
+ // as far as possible.
+ // - When searching for matches, we search along a diagonal going through
+ // through the "frontier" point. If no matches are found,
+ // we advance the frontier towards the opposite corner.
+ // - This algorithm terminates when either the X coordinates or the
+ // Y coordinates of the forward and reverse frontier points ever intersect.
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
@@ -389,6 +392,7 @@ type point struct{ X, Y int }
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
func zigzag(x int) int {
if x&1 != 0 {
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
deleted file mode 100644
index 9147a2997..000000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package value
-
-import (
- "math"
- "reflect"
-)
-
-// IsZero reports whether v is the zero value.
-// This does not rely on Interface and so can be used on unexported fields.
-func IsZero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return v.Bool() == false
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return math.Float64bits(v.Float()) == 0
- case reflect.Complex64, reflect.Complex128:
- return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
- case reflect.String:
- return v.String() == ""
- case reflect.UnsafePointer:
- return v.Pointer() == 0
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
- return v.IsNil()
- case reflect.Array:
- for i := 0; i < v.Len(); i++ {
- if !IsZero(v.Index(i)) {
- return false
- }
- }
- return true
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- if !IsZero(v.Field(i)) {
- return false
- }
- }
- return true
- }
- return false
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
index e57b9eb53..1f9ca9c48 100644
--- a/vendor/github.com/google/go-cmp/cmp/options.go
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -33,6 +33,7 @@ type Option interface {
}
// applicableOption represents the following types:
+//
// Fundamental: ignore | validator | *comparer | *transformer
// Grouping: Options
type applicableOption interface {
@@ -43,6 +44,7 @@ type applicableOption interface {
}
// coreOption represents the following types:
+//
// Fundamental: ignore | validator | *comparer | *transformer
// Filters: *pathFilter | *valuesFilter
type coreOption interface {
@@ -336,9 +338,9 @@ func (tr transformer) String() string {
// both implement T.
//
// The equality function must be:
-// • Symmetric: equal(x, y) == equal(y, x)
-// • Deterministic: equal(x, y) == equal(x, y)
-// • Pure: equal(x, y) does not modify x or y
+// - Symmetric: equal(x, y) == equal(y, x)
+// - Deterministic: equal(x, y) == equal(x, y)
+// - Pure: equal(x, y) does not modify x or y
func Comparer(f interface{}) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
@@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option {
}
// Result represents the comparison result for a single node and
-// is provided by cmp when calling Result (see Reporter).
+// is provided by cmp when calling Report (see Reporter).
type Result struct {
_ [0]func() // Make Result incomparable
flags resultFlags
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
index c71003463..a0a588502 100644
--- a/vendor/github.com/google/go-cmp/cmp/path.go
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -41,13 +41,13 @@ type PathStep interface {
// The type of each valid value is guaranteed to be identical to Type.
//
// In some cases, one or both may be invalid or have restrictions:
- // • For StructField, both are not interface-able if the current field
- // is unexported and the struct type is not explicitly permitted by
- // an Exporter to traverse unexported fields.
- // • For SliceIndex, one may be invalid if an element is missing from
- // either the x or y slice.
- // • For MapIndex, one may be invalid if an entry is missing from
- // either the x or y map.
+ // - For StructField, both are not interface-able if the current field
+ // is unexported and the struct type is not explicitly permitted by
+ // an Exporter to traverse unexported fields.
+ // - For SliceIndex, one may be invalid if an element is missing from
+ // either the x or y slice.
+ // - For MapIndex, one may be invalid if an entry is missing from
+ // either the x or y map.
//
// The provided values must not be mutated.
Values() (vx, vy reflect.Value)
@@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep {
// The simplified path only contains struct field accesses.
//
// For example:
+//
// MyMap.MySlices.MyField
func (pa Path) String() string {
var ss []string
@@ -108,6 +109,7 @@ func (pa Path) String() string {
// GoString returns the path to a specific node using Go syntax.
//
// For example:
+//
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
func (pa Path) GoString() string {
var ssPre, ssPost []string
@@ -159,7 +161,7 @@ func (ps pathStep) String() string {
if ps.typ == nil {
return "<nil>"
}
- s := ps.typ.String()
+ s := value.TypeString(ps.typ, false)
if s == "" || strings.ContainsAny(s, "{}\n") {
return "root" // Type too simple or complex to print
}
@@ -282,7 +284,7 @@ type typeAssertion struct {
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
-func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
+func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
// Transform is a transformation from the parent type to the current type.
type Transform struct{ *transform }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
index 1ef65ac1d..2050bf6b4 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_compare.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -7,8 +7,6 @@ package cmp
import (
"fmt"
"reflect"
-
- "github.com/google/go-cmp/cmp/internal/value"
)
// numContextRecords is the number of surrounding equal records to print.
@@ -117,7 +115,7 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out
// For leaf nodes, format the value based on the reflect.Values alone.
// As a special case, treat equal []byte as a leaf nodes.
- isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0))
+ isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
if v.MaxDepth == 0 || isEqualBytes {
switch opts.DiffMode {
@@ -248,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt
var isZero bool
switch opts.DiffMode {
case diffIdentical:
- isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
+ isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
case diffRemoved:
- isZero = value.IsZero(r.Value.ValueX)
+ isZero = r.Value.ValueX.IsZero()
case diffInserted:
- isZero = value.IsZero(r.Value.ValueY)
+ isZero = r.Value.ValueY.IsZero()
}
if isZero {
continue
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
index 287b89358..2ab41fad3 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -16,6 +16,13 @@ import (
"github.com/google/go-cmp/cmp/internal/value"
)
+var (
+ anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+ stringType = reflect.TypeOf((*string)(nil)).Elem()
+ bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
+ byteType = reflect.TypeOf((*byte)(nil)).Elem()
+)
+
type formatValueOptions struct {
// AvoidStringer controls whether to avoid calling custom stringer
// methods like error.Error or fmt.Stringer.String.
@@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
}
for i := 0; i < v.NumField(); i++ {
vv := v.Field(i)
- if value.IsZero(vv) {
+ if vv.IsZero() {
continue // Elide fields with zero values
}
if len(list) == maxLen {
@@ -205,7 +212,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
}
// Check whether this is a []byte of text data.
- if t.Elem() == reflect.TypeOf(byte(0)) {
+ if t.Elem() == byteType {
b := v.Bytes()
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
index 68b5c1ae1..23e444f62 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_slices.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
case t.Kind() == reflect.String:
sx, sy = vx.String(), vy.String()
isString = true
- case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
+ case t.Kind() == reflect.Slice && t.Elem() == byteType:
sx, sy = string(vx.Bytes()), string(vy.Bytes())
isString = true
case t.Kind() == reflect.Array:
@@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
})
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
- isPureLinedText = efficiencyLines < 4*efficiencyBytes
+ quotedLength := len(strconv.Quote(sx + sy))
+ unquotedLength := len(sx) + len(sy)
+ escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
+ isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
}
}
@@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
// differences in a string literal. This format is more readable,
// but has edge-cases where differences are visually indistinguishable.
// This format is avoided under the following conditions:
- // • A line starts with `"""`
- // • A line starts with "..."
- // • A line contains non-printable characters
- // • Adjacent different lines differ only by whitespace
+ // - A line starts with `"""`
+ // - A line starts with "..."
+ // - A line contains non-printable characters
+ // - Adjacent different lines differ only by whitespace
//
// For example:
+ //
// """
// ... // 3 identical lines
// foo
@@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
switch t.Kind() {
case reflect.String:
- if t != reflect.TypeOf(string("")) {
+ if t != stringType {
out = opts.FormatType(t, out)
}
case reflect.Slice:
@@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
switch t.Kind() {
case reflect.String:
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
- if t != reflect.TypeOf(string("")) {
+ if t != stringType {
out = opts.FormatType(t, out)
}
case reflect.Slice:
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
- if t != reflect.TypeOf([]byte(nil)) {
+ if t != bytesType {
out = opts.FormatType(t, out)
}
}
@@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice(
// {NumIdentical: 3},
// {NumInserted: 1},
// ]
-//
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
var prevMode byte
lastStats := func(mode byte) *diffStats {
@@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats)
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
// {NumIdentical: 63},
// ]
-//
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
groups, groupsOrig := groups[:0], groups
for i, ds := range groupsOrig {
@@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat
// {NumRemoved: 9},
// {NumIdentical: 64}, // incremented by 10
// ]
-//
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
var ix, iy int // indexes into sequence x and y
for i, ds := range groups {
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
index 0fd46d7ff..388fcf571 100644
--- a/vendor/github.com/google/go-cmp/cmp/report_text.go
+++ b/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats {
// String prints a humanly-readable summary of coalesced records.
//
// Example:
+//
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
func (s diffStats) String() string {
var ss []string
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index ad5c63a82..2d6b01077 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,12 @@ This package provides various compression algorithms.
# changelog
+* July 21, 2022 (v1.15.9)
+
+ * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
+ * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
+ * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+
* July 13, 2022 (v1.15.8)
* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index f8435998e..f00da5b21 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -131,7 +131,8 @@ func (d *compressor) fillDeflate(b []byte) int {
s := d.state
if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
// shift the window by windowSize
- copy(d.window[:], d.window[windowSize:2*windowSize])
+ //copy(d.window[:], d.window[windowSize:2*windowSize])
+ *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
s.index -= windowSize
d.windowEnd -= windowSize
if d.blockStart >= windowSize {
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
index 71c75a065..bb36351a5 100644
--- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -7,19 +7,19 @@ package flate
// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
// LZ77 decompresses data through sequences of two forms of commands:
//
-// * Literal insertions: Runs of one or more symbols are inserted into the data
-// stream as is. This is accomplished through the writeByte method for a
-// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
-// Any valid stream must start with a literal insertion if no preset dictionary
-// is used.
+// - Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
//
-// * Backward copies: Runs of one or more symbols are copied from previously
-// emitted data. Backward copies come as the tuple (dist, length) where dist
-// determines how far back in the stream to copy from and length determines how
-// many bytes to copy. Note that it is valid for the length to be greater than
-// the distance. Since LZ77 uses forward copies, that situation is used to
-// perform a form of run-length encoding on repeated runs of symbols.
-// The writeCopy and tryWriteCopy are used to implement this command.
+// - Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
//
// For performance reasons, this implementation performs little to no sanity
// checks about the arguments. As such, the invariants documented for each
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
index f781aaa62..cd77a2cc4 100644
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -104,7 +104,8 @@ func (e *fastGen) addBlock(src []byte) int32 {
}
// Move down
offset := int32(len(e.hist)) - maxMatchOffset
- copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ // copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
e.cur += offset
e.hist = e.hist[:maxMatchOffset]
}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
index 40ef45c2f..89a5dd89f 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -265,9 +265,9 @@ func (w *huffmanBitWriter) writeBytes(bytes []byte) {
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
// information. Code badCode is an end marker
//
-// numLiterals The number of literals in literalEncoding
-// numOffsets The number of offsets in offsetEncoding
-// litenc, offenc The literal and offset encoder to use
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
for i := range w.codegenFreq {
w.codegenFreq[i] = 0
@@ -460,9 +460,9 @@ func (w *huffmanBitWriter) writeOutBits() {
// Write the header of a dynamic Huffman block to the output stream.
//
-// numLiterals The number of literals specified in codegen
-// numOffsets The number of offsets specified in codegen
-// numCodegens The number of codegens used in codegen
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
if w.err != nil {
return
@@ -790,9 +790,11 @@ func (w *huffmanBitWriter) fillTokens() {
// and offsetEncoding.
// The number of literal and offset tokens is returned.
func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
- copy(w.literalFreq[:], t.litHist[:])
- copy(w.literalFreq[256:], t.extraHist[:])
- copy(w.offsetFreq[:], t.offHist[:offsetCodeCount])
+ //copy(w.literalFreq[:], t.litHist[:])
+ *(*[256]uint16)(w.literalFreq[:]) = t.litHist
+ //copy(w.literalFreq[256:], t.extraHist[:])
+ *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
+ w.offsetFreq = t.offHist
if t.n == 0 {
return
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
index 5ac144f28..be7b58b47 100644
--- a/vendor/github.com/klauspost/compress/flate/huffman_code.go
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -168,13 +168,18 @@ func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
// The cases of 0, 1, and 2 literals are handled by special case code.
//
// list An array of the literals with non-zero frequencies
-// and their associated frequencies. The array is in order of increasing
-// frequency, and has as its last element a special element with frequency
-// MaxInt32
+//
+// and their associated frequencies. The array is in order of increasing
+// frequency, and has as its last element a special element with frequency
+// MaxInt32
+//
// maxBits The maximum number of bits that should be used to encode any literal.
-// Must be less than 16.
+//
+// Must be less than 16.
+//
// return An integer array in which array[i] indicates the number of literals
-// that should be encoded in i bits.
+//
+// that should be encoded in i bits.
func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
if maxBits >= maxBitsLimit {
panic("flate: maxBits too large")
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
index 4b97576bd..ef6339d95 100644
--- a/vendor/github.com/klauspost/compress/flate/level5.go
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -191,14 +191,21 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
// Try to locate a better match by checking the end of best match...
if sAt := s + l; l < 30 && sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
- // Test current
- t2 := eLong - e.cur - l
- off := s - t2
+ t2 := eLong - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
if t2 >= 0 && off < maxMatchOffset && off > 0 {
- if l2 := e.matchlenLong(s, t2, src); l2 > l {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
+ s = s2
}
}
}
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
index 62888edf3..85e4b2095 100644
--- a/vendor/github.com/klauspost/compress/flate/level6.go
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -213,24 +213,33 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
// Try to locate a better match by checking the end-of-match...
if sAt := s + l; sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
// Test current
- t2 := eLong.Cur.offset - e.cur - l
- off := s - t2
+ t2 := eLong.Cur.offset - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
if off < maxMatchOffset {
if off > 0 && t2 >= 0 {
- if l2 := e.matchlenLong(s, t2, src); l2 > l {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
+ s = s2
}
}
// Test next:
- t2 = eLong.Prev.offset - e.cur - l
- off := s - t2
+ t2 = eLong.Prev.offset - e.cur - l + skipBeginning
+ off := s2 - t2
if off > 0 && off < maxMatchOffset && t2 >= 0 {
- if l2 := e.matchlenLong(s, t2, src); l2 > l {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
t = t2
l = l2
+ s = s2
}
}
}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index c0c48bd70..42a237eac 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -763,17 +763,20 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
@@ -997,17 +1000,22 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ // copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index 9f3e9f79e..ba7e8e6b0 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -14,12 +14,14 @@ import (
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
+//
//go:noescape
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
+//
//go:noescape
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
@@ -145,11 +147,13 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress1X when tablelog > 8.
+//
//go:noescape
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
// of Decompress1X when tablelog > 8.
+//
//go:noescape
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
index dd1a5aecd..8d2187a2c 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -1,7 +1,6 @@
// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
//go:build amd64 && !appengine && !noasm && gc
-// +build amd64,!appengine,!noasm,gc
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
index 4f6f37cb2..908c17de6 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -122,17 +122,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ //copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 511bba65d..298c4f8e9 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
@@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
+//
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
@@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index beb7fa872..65b38abed 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
## Installation
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 7eed729be..f52d1aed6 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"sync"
@@ -651,7 +650,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
buf.Write(in)
- ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
+ os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
}
return nil
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 2ad02070d..176788f25 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -7,7 +7,6 @@ package zstd
import (
"fmt"
"io"
- "io/ioutil"
)
type byteBuffer interface {
@@ -124,7 +123,7 @@ func (r *readerWrapper) readByte() (byte, error) {
}
func (r *readerWrapper) skipN(n int64) error {
- n2, err := io.CopyN(ioutil.Discard, r.r, n)
+ n2, err := io.CopyN(io.Discard, r.r, n)
if n2 != n {
err = io.ErrUnexpectedEOF
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index d212f4737..6104eb793 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -312,6 +312,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// Grab a block decoder and frame decoder.
block := <-d.decoders
frame := block.localFrame
+ initialSize := len(dst)
defer func() {
if debugDecoder {
printf("re-adding decoder: %p", block)
@@ -354,7 +355,16 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
return dst, ErrWindowSizeExceeded
}
if frame.FrameContentSize != fcsUnknown {
- if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+ if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+ }
return dst, ErrDecoderSizeExceeded
}
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
@@ -364,7 +374,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
}
- if cap(dst) == 0 {
+ if cap(dst) == 0 && !d.o.limitToCap {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
size := len(input) * 2
@@ -382,6 +392,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if err != nil {
return dst, err
}
+ if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+ return dst, ErrDecoderSizeExceeded
+ }
if len(frame.bBuf) == 0 {
if debugDecoder {
println("frame dbuf empty")
@@ -852,6 +865,10 @@ decodeStream:
}
}
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+ }
+
err = ErrDecoderSizeExceeded
}
if err != nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index c70e6fa0f..666c2715f 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -20,6 +20,7 @@ type decoderOptions struct {
maxWindowSize uint64
dicts []dict
ignoreChecksum bool
+ limitToCap bool
}
func (o *decoderOptions) setDefault() {
@@ -114,6 +115,17 @@ func WithDecoderMaxWindow(size uint64) DOption {
}
}
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.limitToCap = b
+ return nil
+ }
+}
+
// IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index c769f6941..d70e3fd3d 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -416,15 +416,23 @@ encodeLoop:
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 3 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 3
+
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
- cv := load3232(src, s)
+ s2 := s + skipBeginning
+ cv := load3232(src, s2)
candidateL := e.longTable[nextHashL]
- coffsetL := candidateL.offset - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("long match at end-of-match")
@@ -434,12 +442,13 @@ encodeLoop:
// Check prev long...
if true {
- coffsetL = candidateL.prev - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("prev long match at end-of-match")
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 7ff0c64fa..1f4a9a245 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -1103,7 +1103,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
- copy(e.longTable[:], e.dictLongTable)
+ //copy(e.longTable[:], e.dictLongTable)
+ e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
for i := range e.longTableShardDirty {
e.longTableShardDirty[i] = false
}
@@ -1114,7 +1115,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
e.longTableShardDirty[i] = false
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f51ab529a..181edc02b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -871,7 +871,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
const shardCnt = tableShardCnt
const shardSize = tableShardSize
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
- copy(e.table[:], e.dictTable)
+ //copy(e.table[:], e.dictTable)
+ e.table = *(*[tableSize]tableEntry)(e.dictTable)
for i := range e.tableShardDirty {
e.tableShardDirty[i] = false
}
@@ -883,7 +884,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
e.tableShardDirty[i] = false
}
e.allDirty = false
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 9568a4ba3..1559a2038 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -353,12 +353,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
// Store input length, so we only check new data.
crcStart := len(dst)
d.history.decoders.maxSyncLen = 0
+ if d.o.limitToCap {
+ d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+ }
if d.FrameContentSize != fcsUnknown {
- d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+ d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ }
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+ }
return dst, ErrDecoderSizeExceeded
}
- if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen)
+ }
+ if !d.o.limitToCap && uint64(cap(dst)-len(dst)) < d.history.decoders.maxSyncLen {
// Alloc for output
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
copy(dst2, dst)
@@ -378,7 +389,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if err != nil {
break
}
- if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+ println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
+ err = ErrDecoderSizeExceeded
+ break
+ }
+ if d.o.limitToCap && len(d.history.b) > cap(dst) {
+ println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
err = ErrDecoderSizeExceeded
break
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
index da32b4420..bcde39869 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -1,7 +1,6 @@
// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
//go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
TEXT ·buildDtable_asm(SB), $0-24
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 7598c1018..1c704d30c 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -32,18 +32,22 @@ type decodeSyncAsmContext struct {
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
//go:noescape
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
//go:noescape
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
//go:noescape
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
@@ -201,20 +205,24 @@ const errorNotEnoughSpace = 5
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
//go:noescape
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
//go:noescape
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
@@ -308,10 +316,12 @@ type executeAsmContext struct {
// Returns false if a match offset is too big.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Same as above, but with safe memcopies
+//
//go:noescape
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 27e76774c..52e5703c2 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -1,7 +1,6 @@
// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
//go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV
diff --git a/vendor/github.com/mistifyio/go-zfs/.gitignore b/vendor/github.com/mistifyio/go-zfs/.gitignore
deleted file mode 100644
index 8000dd9db..000000000
--- a/vendor/github.com/mistifyio/go-zfs/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.vagrant
diff --git a/vendor/github.com/mistifyio/go-zfs/.travis.yml b/vendor/github.com/mistifyio/go-zfs/.travis.yml
deleted file mode 100644
index acbd39cef..000000000
--- a/vendor/github.com/mistifyio/go-zfs/.travis.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-language: go
-dist: trusty
-sudo: required
-cache:
- directories:
- - $HOME/.ccache
- - $HOME/zfs
-
-branches:
- only:
- - master
-
-env:
- - rel=0.6.5.11
- - rel=0.7.6
-
-go:
- - "1.10.x"
- - master
-
-before_install:
- - export MAKEFLAGS=-j$(($(grep -c '^processor' /proc/cpuinfo) * 2 + 1))
- - export PATH=/usr/lib/ccache:$PATH
- - go get github.com/alecthomas/gometalinter
- - gometalinter --install --update
- - sudo apt-get update -y && sudo apt-get install -y libattr1-dev libblkid-dev linux-headers-$(uname -r) tree uuid-dev
- - mkdir -p $HOME/zfs
- - cd $HOME/zfs
- - [[ -d spl-$rel.tar.gz ]] || curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/spl-$rel.tar.gz | tar xz
- - [[ -d zfs-$rel.tar.gz ]] || curl -L https://github.com/zfsonlinux/zfs/releases/download/zfs-$rel/zfs-$rel.tar.gz | tar xz
- - (cd spl-$rel && ./configure --prefix=/usr && make && sudo make install)
- - (cd zfs-$rel && ./configure --prefix=/usr && make && sudo make install)
- - sudo modprobe zfs
- - cd $TRAVIS_BUILD_DIR
-
-script:
- - sudo -E $(which go) test -v ./...
- - gometalinter --vendor --vendored-linters ./... || true
- - gometalinter --errors --vendor --vendored-linters ./...
-
-notifications:
- email: false
- irc: "chat.freenode.net#cerana"
diff --git a/vendor/github.com/mistifyio/go-zfs/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/Vagrantfile
deleted file mode 100644
index 3bd6e120b..000000000
--- a/vendor/github.com/mistifyio/go-zfs/Vagrantfile
+++ /dev/null
@@ -1,34 +0,0 @@
-
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- config.vm.box = "ubuntu/trusty64"
- config.ssh.forward_agent = true
-
- config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true
-
- config.vm.provision "shell", inline: <<EOF
-cat << END > /etc/profile.d/go.sh
-export GOPATH=\\$HOME/go
-export PATH=\\$GOPATH/bin:/usr/local/go/bin:\\$PATH
-END
-
-chown -R vagrant /home/vagrant/go
-
-apt-get update
-apt-get install -y software-properties-common curl
-apt-add-repository --yes ppa:zfs-native/stable
-apt-get update
-apt-get install -y ubuntu-zfs
-
-cd /home/vagrant
-curl -z go1.3.3.linux-amd64.tar.gz -L -O https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz
-tar -C /usr/local -zxf /home/vagrant/go1.3.3.linux-amd64.tar.gz
-
-cat << END > /etc/sudoers.d/go
-Defaults env_keep += "GOPATH"
-END
-
-EOF
-
-end
diff --git a/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go
deleted file mode 100644
index a46f73060..000000000
--- a/vendor/github.com/mistifyio/go-zfs/utils_notsolaris.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !solaris
-
-package zfs
-
-import (
- "strings"
-)
-
-// List of ZFS properties to retrieve from zfs list command on a non-Solaris platform
-var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced", "written", "logicalused", "usedbydataset"}
-
-var dsPropListOptions = strings.Join(dsPropList, ",")
-
-// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform
-var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"}
-var zpoolPropListOptions = strings.Join(zpoolPropList, ",")
-var zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
diff --git a/vendor/github.com/mistifyio/go-zfs/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/utils_solaris.go
deleted file mode 100644
index 0a7e90f22..000000000
--- a/vendor/github.com/mistifyio/go-zfs/utils_solaris.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build solaris
-
-package zfs
-
-import (
- "strings"
-)
-
-// List of ZFS properties to retrieve from zfs list command on a Solaris platform
-var dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced"}
-
-var dsPropListOptions = strings.Join(dsPropList, ",")
-
-// List of Zpool properties to retrieve from zpool list command on a non-Solaris platform
-var zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"}
-var zpoolPropListOptions = strings.Join(zpoolPropList, ",")
-var zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.envrc b/vendor/github.com/mistifyio/go-zfs/v3/.envrc
new file mode 100644
index 000000000..f310aea66
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/.envrc
@@ -0,0 +1,4 @@
+has nix && use nix
+dotenv_if_exists
+PATH_add bin
+path_add GOBIN bin
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.gitignore b/vendor/github.com/mistifyio/go-zfs/v3/.gitignore
new file mode 100644
index 000000000..0867490ad
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/.gitignore
@@ -0,0 +1,6 @@
+bin
+go-zfs.test
+.vagrant
+
+# added by lint-install
+out/
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml b/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml
new file mode 100644
index 000000000..499c3eca1
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml
@@ -0,0 +1,207 @@
+run:
+ # The default runtime timeout is 1m, which doesn't work well on Github Actions.
+ timeout: 4m
+
+# NOTE: This file is populated by the lint-install tool. Local adjustments may be overwritten.
+linters-settings:
+ cyclop:
+ # NOTE: This is a very high transitional threshold
+ max-complexity: 37
+ package-average: 34.0
+ skip-tests: true
+
+ gocognit:
+ # NOTE: This is a very high transitional threshold
+ min-complexity: 98
+
+ dupl:
+ threshold: 200
+
+ goconst:
+ min-len: 4
+ min-occurrences: 5
+ ignore-tests: true
+
+ gosec:
+ excludes:
+ - G107 # Potential HTTP request made with variable url
+ - G204 # Subprocess launched with function call as argument or cmd arguments
+ - G404 # Use of weak random number generator (math/rand instead of crypto/rand
+
+ errorlint:
+ # these are still common in Go: for instance, exit errors.
+ asserts: false
+
+ exhaustive:
+ default-signifies-exhaustive: true
+
+ nestif:
+ min-complexity: 8
+
+ nolintlint:
+ require-explanation: true
+ allow-unused: false
+ require-specific: true
+
+ revive:
+ ignore-generated-header: true
+ severity: warning
+ rules:
+ - name: atomic
+ - name: blank-imports
+ - name: bool-literal-in-expr
+ - name: confusing-naming
+ - name: constant-logical-expr
+ - name: context-as-argument
+ - name: context-keys-type
+ - name: deep-exit
+ - name: defer
+ - name: range-val-in-closure
+ - name: range-val-address
+ - name: dot-imports
+ - name: error-naming
+ - name: error-return
+ - name: error-strings
+ - name: errorf
+ - name: exported
+ - name: identical-branches
+ - name: if-return
+ - name: import-shadowing
+ - name: increment-decrement
+ - name: indent-error-flow
+ - name: indent-error-flow
+ - name: package-comments
+ - name: range
+ - name: receiver-naming
+ - name: redefines-builtin-id
+ - name: superfluous-else
+ - name: struct-tag
+ - name: time-naming
+ - name: unexported-naming
+ - name: unexported-return
+ - name: unnecessary-stmt
+ - name: unreachable-code
+ - name: unused-parameter
+ - name: var-declaration
+ - name: var-naming
+ - name: unconditional-recursion
+ - name: waitgroup-by-value
+
+ staticcheck:
+ go: "1.16"
+
+ unused:
+ go: "1.16"
+
+output:
+ sort-results: true
+
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - bodyclose
+ - cyclop
+ - deadcode
+ - dogsled
+ - dupl
+ - durationcheck
+ - errcheck
+ - errname
+ - errorlint
+ - exhaustive
+ - exportloopref
+ - forcetypeassert
+ - gocognit
+ - goconst
+ - gocritic
+ - godot
+ - gofmt
+ - gofumpt
+ - gosec
+ - goheader
+ - goimports
+ - goprintffuncname
+ - gosimple
+ - govet
+ - ifshort
+ - importas
+ - ineffassign
+ - makezero
+ - misspell
+ - nakedret
+ - nestif
+ - nilerr
+ - noctx
+ - nolintlint
+ - predeclared
+ # disabling for the initial iteration of the linting tool
+ # - promlinter
+ - revive
+ - rowserrcheck
+ - sqlclosecheck
+ - staticcheck
+ - structcheck
+ - stylecheck
+ - thelper
+ - tparallel
+ - typecheck
+ - unconvert
+ - unparam
+ - unused
+ - varcheck
+ - wastedassign
+ - whitespace
+
+ # Disabled linters, due to being misaligned with Go practices
+ # - exhaustivestruct
+ # - gochecknoglobals
+ # - gochecknoinits
+ # - goconst
+ # - godox
+ # - goerr113
+ # - gomnd
+ # - lll
+ # - nlreturn
+ # - testpackage
+ # - wsl
+ # Disabled linters, due to not being relevant to our code base:
+ # - maligned
+ # - prealloc "For most programs usage of prealloc will be a premature optimization."
+ # Disabled linters due to bad error messages or bugs
+ # - tagliatelle
+
+issues:
+ # Excluding configuration per-path, per-linter, per-text and per-source
+ exclude-rules:
+ - path: _test\.go
+ linters:
+ - dupl
+ - errcheck
+ - forcetypeassert
+ - gocyclo
+ - gosec
+ - noctx
+
+ - path: .*cmd.*
+ linters:
+ - noctx
+
+ - path: main\.go
+ linters:
+ - noctx
+
+ - path: .*cmd.*
+ text: "deep-exit"
+
+ - path: main\.go
+ text: "deep-exit"
+
+ # This check is of questionable value
+ - linters:
+ - tparallel
+ text: "call t.Parallel on the top level as well as its subtests"
+
+ # Don't hide lint issues just because there are many of them
+ max-same-issues: 0
+ max-issues-per-linter: 0
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.yamllint b/vendor/github.com/mistifyio/go-zfs/v3/.yamllint
new file mode 100644
index 000000000..9a08ad176
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/.yamllint
@@ -0,0 +1,16 @@
+---
+extends: default
+
+rules:
+ braces:
+ max-spaces-inside: 1
+ brackets:
+ max-spaces-inside: 1
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ line-length:
+ level: warning
+ max: 160
+ allow-non-breakable-inline-mappings: true
+ truthy: disable
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md b/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md
new file mode 100644
index 000000000..349245d03
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md
@@ -0,0 +1,250 @@
+# Change Log
+
+All notable changes to this project will be documented in this file.
+This project adheres to [Semantic Versioning](http://semver.org/).
+This change log follows the advice of [Keep a CHANGELOG](https://github.com/olivierlacan/keep-a-changelog).
+
+## [Unreleased]
+
+## [3.0.0] - 2022-03-30
+
+### Added
+
+- Rename, Mount and Unmount methods
+- Parse more fields into Zpool type:
+ - dedupratio
+ - fragmentation
+ - freeing
+ - leaked
+ - readonly
+- Parse more fields into Dataset type:
+ - referenced
+- Incremental Send
+- Parse numbers in exact format
+- Support for Solaris (non-blockint, best-effort status)
+- Debug logging for command invocation
+- Use GitHub Actions for CI
+- Nix shell for dev env reproducibility
+- Direnv file for ease of dev
+- Formatting/lint checks (enforced by CI)
+- Go Module
+- FreeBSD based vagrant machine
+
+### Changed
+
+- Temporarily adjust TestDiff expected strings depending on ZFS version
+- Use one `zfs list`/`zpool list` call instead of many `zfs get`/`zpool get`
+- ZFS docs links now point to OpenZFS pages
+- Ubuntu vagrant box changed to generic/ubuntu2004
+
+### Fixed
+
+- `GetProperty` returning `VALUE` instead of the actual value
+
+### Shortlog
+
+ Amit Krishnan (1):
+ Issue #39 and Issue #40 - Enable Solaris support for go-zfs Switch from zfs/zpool get to zfs/zpool list for better performance Signed-off-by: Amit Krishnan <krish.amit@gmail.com>
+
+ Anand Patil (3):
+ Added Rename
+ Small fix to rename.
+ Added mount and umount methods
+
+ Brian Akins (1):
+ Add 'referenced' to zfs properties
+
+ Brian Bickerton (3):
+ Add debug logging before and after running external zfs command
+ Don't export the default no-op logger
+ Update uuid package repo url
+
+ Dmitry Teselkin (1):
+ Issue #52 - fix parseLine for fragmentation field
+
+ Edward Betts (1):
+ correct spelling mistake
+
+ Justin Cormack (1):
+ Switch to google/uuid which is the maintained version of pborman/uuid
+
+ Manuel Mendez (40):
+ rename Umount -> Unmount to follow zfs command name
+ add missing Unmount/Mount docs
+ always allocate largest Mount slice
+ add travis config
+ travis: update to go 1.7
+ travis: get go deps first
+ test: add nok helper to verify an error occurred
+ test: add test for Dataset.GetProperty
+ ci: swap #cerana on freenode for slack
+ ci: install new deps for 0.7 relases
+ ci: bump zol versions
+ ci: bump go versions
+ ci: use better gometalinter invocations
+ ci: add ccache
+ ci: set env earlier in before_install
+ fix test nok error printing
+ test: restructure TestDiff to deal with different order of changes
+ test: better unicode path handling in TestDiff
+ travis: bump zfs and go versions
+ cache zfs artifacts
+ Add nix-shell and direnv goodness
+ prettierify all the files
+ Add go based tools
+ Add Makefile and rules.mk files
+ gofumptize the code base
+ Use tinkerbell/lint-install to setup linters
+ make golangci-lint happy
+ Update CONTRIBUTING.md with make based approach
+ Add GitHub Actions
+ Drop Travis CI
+ One sentence per line
+ Update documentation links to openzfs-docs pages
+ Format Vagrantfile using rufo
+ Add go-zfs.test to .gitignore
+ test: Avoid reptitive/duplicate error logging and quitting
+ test: Use t.Logf instead of fmt.Printf
+ test: Better cleanup and error handling in zpoolTest
+ test: Do not mark TestDatasets as a t.Helper.
+ test: Change zpoolTest to a pure helper that returns a clean up function
+ test: Move helpers to a different file
+ vagrant: Add set -euxo pipefail to provision script
+ vagrant: Update to generic/ubuntu2004
+ vagrant: Minor fixes to Vagrantfile
+ vagrant: Update to go 1.17.8
+ vagrant: Run go tests as part of provision script
+ vagrant: Indent heredoc script
+ vagrant: Add freebsd machine
+
+ Matt Layher (1):
+ Parse more fields into Zpool type
+
+ Michael Crosby (1):
+ Add incremental send
+
+ Rikard Gynnerstedt (1):
+ remove command name from joined args
+
+ Sebastiaan van Stijn (1):
+ Add go.mod and rename to github.com/mistifyio/go-zfs/v3 (v3.0.0)
+
+ mikudeko (1):
+ Fix GetProperty always returning 'VALUE'
+
+## [2.1.1] - 2015-05-29
+
+### Fixed
+
+- Ignoring first pool listed
+- Incorrect `zfs get` argument ordering
+
+### Shortlog
+
+ Alexey Guskov (1):
+ zfs command uses different order of arguments on freebsd
+
+ Brian Akins (4):
+ test that ListZpools returns expected zpool
+ test error first
+ test error first
+ fix test to check correct return value
+
+ James Cunningham (1):
+ Fix Truncating First Zpool
+
+ Pat Norton (2):
+ Added Use of Go Tools
+ Update CONTRIBUTING.md
+
+## [2.1.0] - 2014-12-08
+
+### Added
+
+- Parse hardlink modification count returned from `zfs diff`
+
+### Fixed
+
+- Continuing instead of erroring when rolling back a non-snapshot
+
+### Shortlog
+
+ Brian Akins (2):
+ need to return the error here
+ use named struct fields
+
+ Jörg Thalheim (1):
+ zfs diff handle hardlinks modification now
+
+## [2.0.0] - 2014-12-02
+
+### Added
+
+- Flags for Destroy:
+ - DESTROY_DEFAULT
+ - DESTROY_DEFER_DELETION (`zfs destroy ... -d`)
+ - DESTROY_FORCE (`zfs destroy ... -f`)
+ - DESTROY_RECURSIVE_CLONES (`zfs destroy ... -R`)
+ - DESTROY_RECURSIVE (`zfs destroy ... -r`)
+ - etc
+- Diff method (`zfs diff`)
+- LogicalUsed and Origin properties to Dataset
+- Type constants for Dataset
+- State constants for Zpool
+- Logger interface
+- Improve documentation
+
+### Shortlog
+
+ Brian Akins (8):
+ remove reflection
+ style change for switches
+ need to check for error
+ keep in scope
+ go 1.3.3
+ golint cleanup
+ Just test if logical used is greater than 0, as this appears to be implementation specific
+ add docs to satisfy golint
+
+ Jörg Thalheim (8):
+ Add deferred flag to zfs.Destroy()
+ add Logicalused property
+ Add Origin property
+ gofmt
+ Add zfs.Diff
+ Add Logger
+ add recursive destroy with clones
+ use CamelCase-style constants
+
+ Matt Layher (4):
+ Improve documentation, document common ZFS operations, provide more references
+ Add zpool state constants, for easier health checking
+ Add dataset type constants, for easier type checking
+ Fix string split in command.Run(), use strings.Fields() instead of strings.Split()
+
+## [1.0.0] - 2014-11-12
+
+### Shortlog
+
+ Brian Akins (7):
+ add godoc badge
+ Add example
+ add information about zpool to struct and parser
+ Add Quota
+ add Children call
+ add Children call
+ fix snapshot tests
+
+ Brian Bickerton (3):
+ MIST-150 Change Snapshot second paramater from properties map[string][string] to recursive bool
+ MIST-150 Add Rollback method and related tests
+ MIST-160 Add SendSnapshot streaming method and tests
+
+ Matt Layher (1):
+ Add Error struct type and tests, enabling easier error return checking
+
+[3.0.0]: https://github.com/mistifyio/go-zfs/compare/v2.1.1...v3.0.0
+[2.1.1]: https://github.com/mistifyio/go-zfs/compare/v2.1.0...v2.1.1
+[2.1.0]: https://github.com/mistifyio/go-zfs/compare/v2.0.0...v2.1.0
+[2.0.0]: https://github.com/mistifyio/go-zfs/compare/v1.0.0...v2.0.0
+[1.0.0]: https://github.com/mistifyio/go-zfs/compare/v0.0.0...v1.0.0
diff --git a/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md b/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md
index f1880c19e..9f625d564 100644
--- a/vendor/github.com/mistifyio/go-zfs/CONTRIBUTING.md
+++ b/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md
@@ -1,20 +1,23 @@
-## How to Contribute ##
+## How to Contribute
-We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute.
+We always welcome contributions to help make `go-zfs` better.
+Please take a moment to read this document if you would like to contribute.
-### Reporting issues ###
+### Reporting issues
We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests.
If you find a bug:
-* Use the GitHub issue search to check whether the bug has already been reported.
-* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository.
-* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug.
+- Use the GitHub issue search to check whether the bug has already been reported.
+- If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository.
+- If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug.
-### Pull requests ###
+### Pull requests
-We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request.
+We welcome bug fixes, improvements, and new features.
+Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope.
+For minor items, just open a pull request.
[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote:
@@ -28,11 +31,13 @@ If you need to pull new changes committed upstream:
$ git fetch upstream
$ git merge upstream/master
-Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature:
+Don' work directly on master as this makes it harder to merge later.
+Create a feature branch for your fix or new feature:
$ git checkout -b <feature-branch-name>
-Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message.
+Please try to commit your changes in logical chunks.
+Ideally, you should include the issue number in the commit message.
$ git commit -m "Issue #<issue-number> - <commit-message>"
@@ -40,21 +45,20 @@ Push your feature branch to your fork.
$ git push origin <feature-branch-name>
-[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes.
+[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch.
+Please give your pull request a clear title and description and note which issue(s) your pull request fixes.
-* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/).
-* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing).
+- All linters should be happy (can be run with `make verify`).
+- Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing).
**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE).
-### Go Tools ###
-For consistency and to catch minor issues for all of go code, please run the following:
-* goimports
-* go vet
-* golint
-* errcheck
+### Go Tools
+
+For consistency and to catch minor issues for all of go code, please run `make verify`.
Many editors can execute the above on save.
-----
+---
+
Guidelines based on http://azkaban.github.io/contributing.html
diff --git a/vendor/github.com/mistifyio/go-zfs/LICENSE b/vendor/github.com/mistifyio/go-zfs/v3/LICENSE
index f4c265cfe..f4c265cfe 100644
--- a/vendor/github.com/mistifyio/go-zfs/LICENSE
+++ b/vendor/github.com/mistifyio/go-zfs/v3/LICENSE
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Makefile b/vendor/github.com/mistifyio/go-zfs/v3/Makefile
new file mode 100644
index 000000000..1c5f55e8c
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/Makefile
@@ -0,0 +1,19 @@
+help: ## Print this help
+ @grep --no-filename -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sed 's/:.*## /·/' | sort | column -t -W 2 -s '·' -c $(shell tput cols)
+
+all: test ## Run tests
+
+-include rules.mk
+-include lint.mk
+
+test: ## Run tests
+ go test ./...
+
+verify: gofumpt prettier lint ## Verify code style, is lint free, freshness ...
+ git diff | (! grep .)
+
+fix: gofumpt-fix prettier-fix ## Fix code formatting errors
+
+tools: ${toolsBins} ## Build Go based build tools
+
+.PHONY: all help test tools verify
diff --git a/vendor/github.com/mistifyio/go-zfs/README.md b/vendor/github.com/mistifyio/go-zfs/v3/README.md
index fef80d727..c91183300 100644
--- a/vendor/github.com/mistifyio/go-zfs/README.md
+++ b/vendor/github.com/mistifyio/go-zfs/v3/README.md
@@ -1,12 +1,12 @@
-# Go Wrapper for ZFS #
+# Go Wrapper for ZFS
Simple wrappers for ZFS command line tools.
[![GoDoc](https://godoc.org/github.com/mistifyio/go-zfs?status.svg)](https://godoc.org/github.com/mistifyio/go-zfs)
-## Requirements ##
+## Requirements
-You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS:
+You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS:
sudo apt-get install python-software-properties
sudo apt-add-repository ppa:zfs-native/stable
@@ -17,13 +17,13 @@ Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't u
Generally you need root privileges to use anything zfs related.
-## Status ##
+## Status
This has been only been tested on Ubuntu 14.04
In the future, we hope to work directly with libzfs.
-# Hacking #
+# Hacking
The tests have decent examples for most functions.
@@ -48,7 +48,6 @@ err := f.Destroy()
```
-# Contributing #
+# Contributing
See the [contributing guidelines](./CONTRIBUTING.md)
-
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile
new file mode 100644
index 000000000..7d8d2decd
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile
@@ -0,0 +1,33 @@
+GOVERSION = "1.17.8"
+
+Vagrant.configure("2") do |config|
+ config.vm.define "ubuntu" do |ubuntu|
+ ubuntu.vm.box = "generic/ubuntu2004"
+ end
+ config.vm.define "freebsd" do |freebsd|
+ freebsd.vm.box = "generic/freebsd13"
+ end
+ config.ssh.forward_agent = true
+ config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true
+ config.vm.provision "shell", inline: <<-EOF
+ set -euxo pipefail
+
+ os=$(uname -s|tr '[A-Z]' '[a-z]')
+ case $os in
+ linux) apt-get update -y && apt-get install -y --no-install-recommends gcc libc-dev zfsutils-linux ;;
+ esac
+
+ cd /tmp
+ curl -fLO --retry-max-time 30 --retry 10 https://go.dev/dl/go#{GOVERSION}.$os-amd64.tar.gz
+ tar -C /usr/local -zxf go#{GOVERSION}.$os-amd64.tar.gz
+ ln -nsf /usr/local/go/bin/go /usr/local/bin/go
+ rm -rf go*.tar.gz
+
+ chown -R vagrant:vagrant /home/vagrant/go
+ cd /home/vagrant/go/src/github.com/mistifyio/go-zfs
+ go test -c
+ sudo ./go-zfs.test -test.v
+ CGO_ENABLED=0 go test -c
+ sudo ./go-zfs.test -test.v
+ EOF
+end
diff --git a/vendor/github.com/mistifyio/go-zfs/error.go b/vendor/github.com/mistifyio/go-zfs/v3/error.go
index 5408ccdb5..5408ccdb5 100644
--- a/vendor/github.com/mistifyio/go-zfs/error.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/error.go
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/lint.mk b/vendor/github.com/mistifyio/go-zfs/v3/lint.mk
new file mode 100644
index 000000000..a1e0a4fd3
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/lint.mk
@@ -0,0 +1,75 @@
+# BEGIN: lint-install -makefile lint.mk .
+# http://github.com/tinkerbell/lint-install
+
+.PHONY: lint
+lint: _lint
+
+LINT_ARCH := $(shell uname -m)
+LINT_OS := $(shell uname)
+LINT_OS_LOWER := $(shell echo $(LINT_OS) | tr '[:upper:]' '[:lower:]')
+LINT_ROOT := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
+
+# shellcheck and hadolint lack arm64 native binaries: rely on x86-64 emulation
+ifeq ($(LINT_OS),Darwin)
+ ifeq ($(LINT_ARCH),arm64)
+ LINT_ARCH=x86_64
+ endif
+endif
+
+LINTERS :=
+FIXERS :=
+
+SHELLCHECK_VERSION ?= v0.8.0
+SHELLCHECK_BIN := out/linters/shellcheck-$(SHELLCHECK_VERSION)-$(LINT_ARCH)
+$(SHELLCHECK_BIN):
+ mkdir -p out/linters
+ rm -rf out/linters/shellcheck-*
+ curl -sSfL https://github.com/koalaman/shellcheck/releases/download/$(SHELLCHECK_VERSION)/shellcheck-$(SHELLCHECK_VERSION).$(LINT_OS_LOWER).$(LINT_ARCH).tar.xz | tar -C out/linters -xJf -
+ mv out/linters/shellcheck-$(SHELLCHECK_VERSION)/shellcheck $@
+ rm -rf out/linters/shellcheck-$(SHELLCHECK_VERSION)/shellcheck
+
+LINTERS += shellcheck-lint
+shellcheck-lint: $(SHELLCHECK_BIN)
+ $(SHELLCHECK_BIN) $(shell find . -name "*.sh")
+
+FIXERS += shellcheck-fix
+shellcheck-fix: $(SHELLCHECK_BIN)
+ $(SHELLCHECK_BIN) $(shell find . -name "*.sh") -f diff | { read -t 1 line || exit 0; { echo "$$line" && cat; } | git apply -p2; }
+
+GOLANGCI_LINT_CONFIG := $(LINT_ROOT)/.golangci.yml
+GOLANGCI_LINT_VERSION ?= v1.43.0
+GOLANGCI_LINT_BIN := out/linters/golangci-lint-$(GOLANGCI_LINT_VERSION)-$(LINT_ARCH)
+$(GOLANGCI_LINT_BIN):
+ mkdir -p out/linters
+ rm -rf out/linters/golangci-lint-*
+ curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b out/linters $(GOLANGCI_LINT_VERSION)
+ mv out/linters/golangci-lint $@
+
+LINTERS += golangci-lint-lint
+golangci-lint-lint: $(GOLANGCI_LINT_BIN)
+ find . -name go.mod -execdir "$(GOLANGCI_LINT_BIN)" run -c "$(GOLINT_CONFIG)" \;
+
+FIXERS += golangci-lint-fix
+golangci-lint-fix: $(GOLANGCI_LINT_BIN)
+ find . -name go.mod -execdir "$(GOLANGCI_LINT_BIN)" run -c "$(GOLINT_CONFIG)" --fix \;
+
+YAMLLINT_VERSION ?= 1.26.3
+YAMLLINT_ROOT := out/linters/yamllint-$(YAMLLINT_VERSION)
+YAMLLINT_BIN := $(YAMLLINT_ROOT)/dist/bin/yamllint
+$(YAMLLINT_BIN):
+ mkdir -p out/linters
+ rm -rf out/linters/yamllint-*
+ curl -sSfL https://github.com/adrienverge/yamllint/archive/refs/tags/v$(YAMLLINT_VERSION).tar.gz | tar -C out/linters -zxf -
+ cd $(YAMLLINT_ROOT) && pip3 install --target dist .
+
+LINTERS += yamllint-lint
+yamllint-lint: $(YAMLLINT_BIN)
+ PYTHONPATH=$(YAMLLINT_ROOT)/dist $(YAMLLINT_ROOT)/dist/bin/yamllint .
+
+.PHONY: _lint $(LINTERS)
+_lint: $(LINTERS)
+
+.PHONY: fix $(FIXERS)
+fix: $(FIXERS)
+
+# END: lint-install -makefile lint.mk .
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/rules.mk b/vendor/github.com/mistifyio/go-zfs/v3/rules.mk
new file mode 100644
index 000000000..4746c978a
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/rules.mk
@@ -0,0 +1,49 @@
+# Only use the recipes defined in these makefiles
+MAKEFLAGS += --no-builtin-rules
+.SUFFIXES:
+# Delete target files if there's an error
+# This avoids a failure to then skip building on next run if the output is created by shell redirection for example
+# Not really necessary for now, but just good to have already if it becomes necessary later.
+.DELETE_ON_ERROR:
+# Treat the whole recipe as a one shell script/invocation instead of one-per-line
+.ONESHELL:
+# Use bash instead of plain sh
+SHELL := bash
+.SHELLFLAGS := -o pipefail -euc
+
+version := $(shell git rev-parse --short HEAD)
+tag := $(shell git tag --points-at HEAD)
+ifneq (,$(tag))
+version := $(tag)-$(version)
+endif
+LDFLAGS := -ldflags "-X main.version=$(version)"
+export CGO_ENABLED := 0
+
+ifeq ($(origin GOBIN), undefined)
+GOBIN := ${PWD}/bin
+export GOBIN
+PATH := ${GOBIN}:${PATH}
+export PATH
+endif
+
+toolsBins := $(addprefix bin/,$(notdir $(shell grep '^\s*_' tooling/tools.go | awk -F'"' '{print $$2}')))
+
+# installs cli tools defined in tools.go
+$(toolsBins): tooling/go.mod tooling/go.sum tooling/tools.go
+$(toolsBins): CMD=$(shell awk -F'"' '/$(@F)"/ {print $$2}' tooling/tools.go)
+$(toolsBins):
+ cd tooling && go install $(CMD)
+
+.PHONY: gofumpt
+gofumpt: bin/gofumpt
+ gofumpt -s -d .
+
+gofumpt-fix: bin/gofumpt
+ gofumpt -s -w .
+
+.PHONY: prettier prettier-fix
+prettier:
+ prettier --list-different --ignore-path .gitignore .
+
+prettier-fix:
+ prettier --write --ignore-path .gitignore .
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/shell.nix b/vendor/github.com/mistifyio/go-zfs/v3/shell.nix
new file mode 100644
index 000000000..e0ea24c16
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/shell.nix
@@ -0,0 +1,26 @@
+let _pkgs = import <nixpkgs> { };
+in { pkgs ? import (_pkgs.fetchFromGitHub {
+ owner = "NixOS";
+ repo = "nixpkgs";
+ #branch@date: 21.11@2022-02-13
+ rev = "560ad8a2f89586ab1a14290f128ad6a393046065";
+ sha256 = "0s0dv1clfpjyzy4p6ywxvzmwx9ddbr2yl77jf1wqdbr0x1206hb8";
+}) { } }:
+
+with pkgs;
+
+mkShell {
+ buildInputs = [
+ git
+ gnumake
+ gnused
+ go
+ nixfmt
+ nodePackages.prettier
+ python3Packages.pip
+ python3Packages.setuptools
+ rufo
+ shfmt
+ vagrant
+ ];
+}
diff --git a/vendor/github.com/mistifyio/go-zfs/utils.go b/vendor/github.com/mistifyio/go-zfs/v3/utils.go
index c18c2c3da..0c2cce7d9 100644
--- a/vendor/github.com/mistifyio/go-zfs/utils.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/utils.go
@@ -21,7 +21,6 @@ type command struct {
}
func (c *command) Run(arg ...string) ([][]string, error) {
-
cmd := exec.Command(c.Command, arg...)
var stdout, stderr bytes.Buffer
@@ -34,7 +33,6 @@ func (c *command) Run(arg ...string) ([][]string, error) {
if c.Stdin != nil {
cmd.Stdin = c.Stdin
-
}
cmd.Stderr = &stderr
@@ -42,16 +40,14 @@ func (c *command) Run(arg ...string) ([][]string, error) {
joinedArgs := strings.Join(cmd.Args, " ")
logger.Log([]string{"ID:" + id, "START", joinedArgs})
- err := cmd.Run()
- logger.Log([]string{"ID:" + id, "FINISH"})
-
- if err != nil {
+ if err := cmd.Run(); err != nil {
return nil, &Error{
Err: err,
Debug: strings.Join([]string{cmd.Path, joinedArgs[1:]}, " "),
Stderr: stderr.String(),
}
}
+ logger.Log([]string{"ID:" + id, "FINISH"})
// assume if you passed in something for stdout, that you know what to do with it
if c.Stdout != nil {
@@ -60,7 +56,7 @@ func (c *command) Run(arg ...string) ([][]string, error) {
lines := strings.Split(stdout.String(), "\n")
- //last line is always blank
+ // last line is always blank
lines = lines[0 : len(lines)-1]
output := make([][]string, len(lines))
@@ -92,33 +88,33 @@ func setUint(field *uint64, value string) error {
return nil
}
-func (ds *Dataset) parseLine(line []string) error {
+func (d *Dataset) parseLine(line []string) error {
var err error
if len(line) != len(dsPropList) {
- return errors.New("Output does not match what is expected on this platform")
+ return errors.New("output does not match what is expected on this platform")
}
- setString(&ds.Name, line[0])
- setString(&ds.Origin, line[1])
+ setString(&d.Name, line[0])
+ setString(&d.Origin, line[1])
- if err = setUint(&ds.Used, line[2]); err != nil {
+ if err = setUint(&d.Used, line[2]); err != nil {
return err
}
- if err = setUint(&ds.Avail, line[3]); err != nil {
+ if err = setUint(&d.Avail, line[3]); err != nil {
return err
}
- setString(&ds.Mountpoint, line[4])
- setString(&ds.Compression, line[5])
- setString(&ds.Type, line[6])
+ setString(&d.Mountpoint, line[4])
+ setString(&d.Compression, line[5])
+ setString(&d.Type, line[6])
- if err = setUint(&ds.Volsize, line[7]); err != nil {
+ if err = setUint(&d.Volsize, line[7]); err != nil {
return err
}
- if err = setUint(&ds.Quota, line[8]); err != nil {
+ if err = setUint(&d.Quota, line[8]); err != nil {
return err
}
- if err = setUint(&ds.Referenced, line[9]); err != nil {
+ if err = setUint(&d.Referenced, line[9]); err != nil {
return err
}
@@ -126,17 +122,13 @@ func (ds *Dataset) parseLine(line []string) error {
return nil
}
- if err = setUint(&ds.Written, line[10]); err != nil {
+ if err = setUint(&d.Written, line[10]); err != nil {
return err
}
- if err = setUint(&ds.Logicalused, line[11]); err != nil {
+ if err = setUint(&d.Logicalused, line[11]); err != nil {
return err
}
- if err = setUint(&ds.Usedbydataset, line[12]); err != nil {
- return err
- }
-
- return nil
+ return setUint(&d.Usedbydataset, line[12])
}
/*
@@ -156,12 +148,12 @@ func unescapeFilepath(path string) (string, error) {
for i := 0; i < llen; {
if path[i] == '\\' {
if llen < i+4 {
- return "", fmt.Errorf("Invalid octal code: too short")
+ return "", fmt.Errorf("invalid octal code: too short")
}
octalCode := path[(i + 1):(i + 4)]
val, err := strconv.ParseUint(octalCode, 8, 8)
if err != nil {
- return "", fmt.Errorf("Invalid octal code: %v", err)
+ return "", fmt.Errorf("invalid octal code: %w", err)
}
buf = append(buf, byte(val))
i += 4
@@ -179,6 +171,7 @@ var changeTypeMap = map[string]ChangeType{
"M": Modified,
"R": Renamed,
}
+
var inodeTypeMap = map[string]InodeType{
"B": BlockDevice,
"C": CharacterDevice,
@@ -191,51 +184,51 @@ var inodeTypeMap = map[string]InodeType{
"F": File,
}
-// matches (+1) or (-1)
-var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)")
+// matches (+1) or (-1).
+var referenceCountRegex = regexp.MustCompile(`\(([+-]\d+?)\)`)
func parseReferenceCount(field string) (int, error) {
matches := referenceCountRegex.FindStringSubmatch(field)
if matches == nil {
- return 0, fmt.Errorf("Regexp does not match")
+ return 0, fmt.Errorf("regexp does not match")
}
return strconv.Atoi(matches[1])
}
func parseInodeChange(line []string) (*InodeChange, error) {
- llen := len(line)
+ llen := len(line) // nolint:ifshort // llen *is* actually used
if llen < 1 {
- return nil, fmt.Errorf("Empty line passed")
+ return nil, fmt.Errorf("empty line passed")
}
changeType := changeTypeMap[line[0]]
if changeType == 0 {
- return nil, fmt.Errorf("Unknown change type '%s'", line[0])
+ return nil, fmt.Errorf("unknown change type '%s'", line[0])
}
switch changeType {
case Renamed:
if llen != 4 {
- return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen)
+ return nil, fmt.Errorf("mismatching number of fields: expect 4, got: %d", llen)
}
case Modified:
if llen != 4 && llen != 3 {
- return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen)
+ return nil, fmt.Errorf("mismatching number of fields: expect 3..4, got: %d", llen)
}
default:
if llen != 3 {
- return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen)
+ return nil, fmt.Errorf("mismatching number of fields: expect 3, got: %d", llen)
}
}
inodeType := inodeTypeMap[line[1]]
if inodeType == 0 {
- return nil, fmt.Errorf("Unknown inode type '%s'", line[1])
+ return nil, fmt.Errorf("unknown inode type '%s'", line[1])
}
path, err := unescapeFilepath(line[2])
if err != nil {
- return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ return nil, fmt.Errorf("failed to parse filename: %w", err)
}
var newPath string
@@ -244,13 +237,13 @@ func parseInodeChange(line []string) (*InodeChange, error) {
case Renamed:
newPath, err = unescapeFilepath(line[3])
if err != nil {
- return nil, fmt.Errorf("Failed to parse filename: %v", err)
+ return nil, fmt.Errorf("failed to parse filename: %w", err)
}
case Modified:
if llen == 4 {
referenceCount, err = parseReferenceCount(line[3])
if err != nil {
- return nil, fmt.Errorf("Failed to parse reference count: %v", err)
+ return nil, fmt.Errorf("failed to parse reference count: %w", err)
}
}
default:
@@ -266,18 +259,19 @@ func parseInodeChange(line []string) (*InodeChange, error) {
}, nil
}
-// example input
-//M / /testpool/bar/
-//+ F /testpool/bar/hello.txt
-//M / /testpool/bar/hello.txt (+1)
-//M / /testpool/bar/hello-hardlink
+// example input for parseInodeChanges
+// M / /testpool/bar/
+// + F /testpool/bar/hello.txt
+// M / /testpool/bar/hello.txt (+1)
+// M / /testpool/bar/hello-hardlink
+
func parseInodeChanges(lines [][]string) ([]*InodeChange, error) {
changes := make([]*InodeChange, len(lines))
for i, line := range lines {
c, err := parseInodeChange(line)
if err != nil {
- return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line)
+ return nil, fmt.Errorf("failed to parse line %d of zfs diff: %w, got: '%s'", i, err, line)
}
changes[i] = c
}
@@ -290,7 +284,7 @@ func listByType(t, filter string) ([]*Dataset, error) {
if filter != "" {
args = append(args, filter)
}
- out, err := zfs(args...)
+ out, err := zfsOutput(args...)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go
new file mode 100644
index 000000000..ef1beac90
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go
@@ -0,0 +1,19 @@
+//go:build !solaris
+// +build !solaris
+
+package zfs
+
+import "strings"
+
+var (
+ // List of ZFS properties to retrieve from zfs list command on a non-Solaris platform.
+ dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced", "written", "logicalused", "usedbydataset"}
+
+ dsPropListOptions = strings.Join(dsPropList, ",")
+
+ // List of Zpool properties to retrieve from zpool list command on a non-Solaris platform.
+ zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio", "fragmentation", "freeing", "leaked"}
+
+ zpoolPropListOptions = strings.Join(zpoolPropList, ",")
+ zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
+)
diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go
new file mode 100644
index 000000000..c6bf6d87a
--- /dev/null
+++ b/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go
@@ -0,0 +1,19 @@
+//go:build solaris
+// +build solaris
+
+package zfs
+
+import "strings"
+
+var (
+ // List of ZFS properties to retrieve from zfs list command on a Solaris platform
+ dsPropList = []string{"name", "origin", "used", "available", "mountpoint", "compression", "type", "volsize", "quota", "referenced"}
+
+ dsPropListOptions = strings.Join(dsPropList, ",")
+
+ // List of Zpool properties to retrieve from zpool list command on a non-Solaris platform
+ zpoolPropList = []string{"name", "health", "allocated", "size", "free", "readonly", "dedupratio"}
+
+ zpoolPropListOptions = strings.Join(zpoolPropList, ",")
+ zpoolArgs = []string{"get", "-p", zpoolPropListOptions}
+)
diff --git a/vendor/github.com/mistifyio/go-zfs/zfs.go b/vendor/github.com/mistifyio/go-zfs/v3/zfs.go
index 4e5087ffe..1166bdc21 100644
--- a/vendor/github.com/mistifyio/go-zfs/zfs.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/zfs.go
@@ -9,19 +9,18 @@ import (
"strings"
)
-// ZFS dataset types, which can indicate if a dataset is a filesystem,
-// snapshot, or volume.
+// ZFS dataset types, which can indicate if a dataset is a filesystem, snapshot, or volume.
const (
DatasetFilesystem = "filesystem"
DatasetSnapshot = "snapshot"
DatasetVolume = "volume"
)
-// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot,
-// or volume. The Type struct member can be used to determine a dataset's type.
+// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, or volume.
+// The Type struct member can be used to determine a dataset's type.
//
// The field definitions can be found in the ZFS manual:
-// http://www.freebsd.org/cgi/man.cgi?zfs(8).
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
type Dataset struct {
Name string
Origin string
@@ -38,10 +37,10 @@ type Dataset struct {
Referenced uint64
}
-// InodeType is the type of inode as reported by Diff
+// InodeType is the type of inode as reported by Diff.
type InodeType int
-// Types of Inodes
+// Types of Inodes.
const (
_ = iota // 0 == unknown type
BlockDevice InodeType = iota
@@ -55,10 +54,10 @@ const (
File
)
-// ChangeType is the type of inode change as reported by Diff
+// ChangeType is the type of inode change as reported by Diff.
type ChangeType int
-// Types of Changes
+// Types of Changes.
const (
_ = iota // 0 == unknown type
Removed ChangeType = iota
@@ -67,10 +66,10 @@ const (
Renamed
)
-// DestroyFlag is the options flag passed to Destroy
+// DestroyFlag is the options flag passed to Destroy.
type DestroyFlag int
-// Valid destroy options
+// Valid destroy options.
const (
DestroyDefault DestroyFlag = 1 << iota
DestroyRecursive = 1 << iota
@@ -79,7 +78,7 @@ const (
DestroyForceUmount = 1 << iota
)
-// InodeChange represents a change as reported by Diff
+// InodeChange represents a change as reported by Diff.
type InodeChange struct {
Change ChangeType
Type InodeType
@@ -88,65 +87,65 @@ type InodeChange struct {
ReferenceCountChange int
}
-// Logger can be used to log commands/actions
+// Logger can be used to log commands/actions.
type Logger interface {
Log(cmd []string)
}
type defaultLogger struct{}
-func (*defaultLogger) Log(cmd []string) {
- return
+func (*defaultLogger) Log([]string) {
}
var logger Logger = &defaultLogger{}
-// SetLogger set a log handler to log all commands including arguments before
-// they are executed
+// SetLogger set a log handler to log all commands including arguments before they are executed.
func SetLogger(l Logger) {
if l != nil {
logger = l
}
}
+// zfs is a helper function to wrap typical calls to zfs that ignores stdout.
+func zfs(arg ...string) error {
+ _, err := zfsOutput(arg...)
+ return err
+}
+
// zfs is a helper function to wrap typical calls to zfs.
-func zfs(arg ...string) ([][]string, error) {
+func zfsOutput(arg ...string) ([][]string, error) {
c := command{Command: "zfs"}
return c.Run(arg...)
}
// Datasets returns a slice of ZFS datasets, regardless of type.
-// A filter argument may be passed to select a dataset with the matching name,
-// or empty string ("") may be used to select all datasets.
+// A filter argument may be passed to select a dataset with the matching name, or empty string ("") may be used to select all datasets.
func Datasets(filter string) ([]*Dataset, error) {
return listByType("all", filter)
}
// Snapshots returns a slice of ZFS snapshots.
-// A filter argument may be passed to select a snapshot with the matching name,
-// or empty string ("") may be used to select all snapshots.
+// A filter argument may be passed to select a snapshot with the matching name, or empty string ("") may be used to select all snapshots.
func Snapshots(filter string) ([]*Dataset, error) {
return listByType(DatasetSnapshot, filter)
}
// Filesystems returns a slice of ZFS filesystems.
-// A filter argument may be passed to select a filesystem with the matching name,
-// or empty string ("") may be used to select all filesystems.
+// A filter argument may be passed to select a filesystem with the matching name, or empty string ("") may be used to select all filesystems.
func Filesystems(filter string) ([]*Dataset, error) {
return listByType(DatasetFilesystem, filter)
}
// Volumes returns a slice of ZFS volumes.
-// A filter argument may be passed to select a volume with the matching name,
-// or empty string ("") may be used to select all volumes.
+// A filter argument may be passed to select a volume with the matching name, or empty string ("") may be used to select all volumes.
func Volumes(filter string) ([]*Dataset, error) {
return listByType(DatasetVolume, filter)
}
-// GetDataset retrieves a single ZFS dataset by name. This dataset could be
-// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume.
+// GetDataset retrieves a single ZFS dataset by name.
+// This dataset could be any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume.
func GetDataset(name string) (*Dataset, error) {
- out, err := zfs("list", "-Hp", "-o", dsPropListOptions, name)
+ out, err := zfsOutput("list", "-Hp", "-o", dsPropListOptions, name)
if err != nil {
return nil, err
}
@@ -174,8 +173,7 @@ func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, er
args = append(args, propsSlice(properties)...)
}
args = append(args, []string{d.Name, dest}...)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(dest)
@@ -192,8 +190,7 @@ func (d *Dataset) Unmount(force bool) (*Dataset, error) {
args = append(args, "-f")
}
args = append(args, d.Name)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(d.Name)
@@ -214,20 +211,17 @@ func (d *Dataset) Mount(overlay bool, options []string) (*Dataset, error) {
args = append(args, strings.Join(options, ","))
}
args = append(args, d.Name)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(d.Name)
}
-// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a
-// new snapshot with the specified name, and streams the input data into the
-// newly-created snapshot.
+// ReceiveSnapshot receives a ZFS stream from the input io.Reader.
+// A new snapshot is created with the specified name, and streams the input data into the newly-created snapshot.
func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) {
c := command{Command: "zfs", Stdin: input}
- _, err := c.Run("receive", name)
- if err != nil {
+ if _, err := c.Run("receive", name); err != nil {
return nil, err
}
return GetDataset(name)
@@ -245,10 +239,21 @@ func (d *Dataset) SendSnapshot(output io.Writer) error {
return err
}
-// CreateVolume creates a new ZFS volume with the specified name, size, and
-// properties.
-// A full list of available ZFS properties may be found here:
-// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+// IncrementalSend sends a ZFS stream of a snapshot to the input io.Writer using the baseSnapshot as the starting point.
+// An error will be returned if the input dataset is not of snapshot type.
+func (d *Dataset) IncrementalSend(baseSnapshot *Dataset, output io.Writer) error {
+ if d.Type != DatasetSnapshot || baseSnapshot.Type != DatasetSnapshot {
+ return errors.New("can only send snapshots")
+ }
+ c := command{Command: "zfs", Stdout: output}
+ _, err := c.Run("send", "-i", baseSnapshot.Name, d.Name)
+ return err
+}
+
+// CreateVolume creates a new ZFS volume with the specified name, size, and properties.
+//
+// A full list of available ZFS properties may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) {
args := make([]string, 4, 5)
args[0] = "create"
@@ -259,17 +264,15 @@ func CreateVolume(name string, size uint64, properties map[string]string) (*Data
args = append(args, propsSlice(properties)...)
}
args = append(args, name)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(name)
}
-// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any
-// descendents of the dataset will be recursively destroyed, including snapshots.
-// If the deferred bit flag is set, the snapshot is marked for deferred
-// deletion.
+// Destroy destroys a ZFS dataset.
+// If the destroy bit flag is set, any descendents of the dataset will be recursively destroyed, including snapshots.
+// If the deferred bit flag is set, the snapshot is marked for deferred deletion.
func (d *Dataset) Destroy(flags DestroyFlag) error {
args := make([]string, 1, 3)
args[0] = "destroy"
@@ -290,25 +293,26 @@ func (d *Dataset) Destroy(flags DestroyFlag) error {
}
args = append(args, d.Name)
- _, err := zfs(args...)
+ err := zfs(args...)
return err
}
// SetProperty sets a ZFS property on the receiving dataset.
-// A full list of available ZFS properties may be found here:
-// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+//
+// A full list of available ZFS properties may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
func (d *Dataset) SetProperty(key, val string) error {
prop := strings.Join([]string{key, val}, "=")
- _, err := zfs("set", prop, d.Name)
+ err := zfs("set", prop, d.Name)
return err
}
-// GetProperty returns the current value of a ZFS property from the
-// receiving dataset.
-// A full list of available ZFS properties may be found here:
-// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+// GetProperty returns the current value of a ZFS property from the receiving dataset.
+//
+// A full list of available ZFS properties may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
func (d *Dataset) GetProperty(key string) (string, error) {
- out, err := zfs("get", "-H", key, d.Name)
+ out, err := zfsOutput("get", "-H", key, d.Name)
if err != nil {
return "", err
}
@@ -317,7 +321,7 @@ func (d *Dataset) GetProperty(key string) (string, error) {
}
// Rename renames a dataset.
-func (d *Dataset) Rename(name string, createParent bool, recursiveRenameSnapshots bool) (*Dataset, error) {
+func (d *Dataset) Rename(name string, createParent, recursiveRenameSnapshots bool) (*Dataset, error) {
args := make([]string, 3, 5)
args[0] = "rename"
args[1] = d.Name
@@ -328,8 +332,7 @@ func (d *Dataset) Rename(name string, createParent bool, recursiveRenameSnapshot
if recursiveRenameSnapshots {
args = append(args, "-r")
}
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return d, err
}
@@ -341,10 +344,10 @@ func (d *Dataset) Snapshots() ([]*Dataset, error) {
return Snapshots(d.Name)
}
-// CreateFilesystem creates a new ZFS filesystem with the specified name and
-// properties.
-// A full list of available ZFS properties may be found here:
-// https://www.freebsd.org/cgi/man.cgi?zfs(8).
+// CreateFilesystem creates a new ZFS filesystem with the specified name and properties.
+//
+// A full list of available ZFS properties may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {
args := make([]string, 1, 4)
args[0] = "create"
@@ -354,16 +357,14 @@ func CreateFilesystem(name string, properties map[string]string) (*Dataset, erro
}
args = append(args, name)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(name)
}
-// Snapshot creates a new ZFS snapshot of the receiving dataset, using the
-// specified name. Optionally, the snapshot can be taken recursively, creating
-// snapshots of all descendent filesystems in a single, atomic operation.
+// Snapshot creates a new ZFS snapshot of the receiving dataset, using the specified name.
+// Optionally, the snapshot can be taken recursively, creating snapshots of all descendent filesystems in a single, atomic operation.
func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) {
args := make([]string, 1, 4)
args[0] = "snapshot"
@@ -372,17 +373,15 @@ func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) {
}
snapName := fmt.Sprintf("%s@%s", d.Name, name)
args = append(args, snapName)
- _, err := zfs(args...)
- if err != nil {
+ if err := zfs(args...); err != nil {
return nil, err
}
return GetDataset(snapName)
}
// Rollback rolls back the receiving ZFS dataset to a previous snapshot.
-// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot
-// rollback cannot be completed without this option, if more recent
-// snapshots exist.
+// Optionally, intermediate snapshots can be destroyed.
+// A ZFS snapshot rollback cannot be completed without this option, if more recent snapshots exist.
// An error will be returned if the input dataset is not of snapshot type.
func (d *Dataset) Rollback(destroyMoreRecent bool) error {
if d.Type != DatasetSnapshot {
@@ -396,13 +395,12 @@ func (d *Dataset) Rollback(destroyMoreRecent bool) error {
}
args = append(args, d.Name)
- _, err := zfs(args...)
+ err := zfs(args...)
return err
}
// Children returns a slice of children of the receiving ZFS dataset.
-// A recursion depth may be specified, or a depth of 0 allows unlimited
-// recursion.
+// A recursion depth may be specified, or a depth of 0 allows unlimited recursion.
func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
args := []string{"list"}
if depth > 0 {
@@ -414,7 +412,7 @@ func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
args = append(args, "-t", "all", "-Hp", "-o", dsPropListOptions)
args = append(args, d.Name)
- out, err := zfs(args...)
+ out, err := zfsOutput(args...)
if err != nil {
return nil, err
}
@@ -436,11 +434,10 @@ func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
}
// Diff returns changes between a snapshot and the given ZFS dataset.
-// The snapshot name must include the filesystem part as it is possible to
-// compare clones with their origin snapshots.
+// The snapshot name must include the filesystem part as it is possible to compare clones with their origin snapshots.
func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) {
- args := []string{"diff", "-FH", snapshot, d.Name}[:]
- out, err := zfs(args...)
+ args := []string{"diff", "-FH", snapshot, d.Name}
+ out, err := zfsOutput(args...)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/mistifyio/go-zfs/zpool.go b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go
index d8db945d7..2f7071305 100644
--- a/vendor/github.com/mistifyio/go-zfs/zpool.go
+++ b/vendor/github.com/mistifyio/go-zfs/v3/zpool.go
@@ -1,8 +1,9 @@
package zfs
-// ZFS zpool states, which can indicate if a pool is online, offline,
-// degraded, etc. More information regarding zpool states can be found here:
-// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html.
+// ZFS zpool states, which can indicate if a pool is online, offline, degraded, etc.
+//
+// More information regarding zpool states can be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zpoolconcepts.7.html#Device_Failure_and_Recovery
const (
ZpoolOnline = "ONLINE"
ZpoolDegraded = "DEGRADED"
@@ -12,8 +13,8 @@ const (
ZpoolRemoved = "REMOVED"
)
-// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can
-// contain many descendent datasets.
+// Zpool is a ZFS zpool.
+// A pool is a top-level structure in ZFS, and can contain many descendent datasets.
type Zpool struct {
Name string
Health string
@@ -27,8 +28,14 @@ type Zpool struct {
DedupRatio float64
}
+// zpool is a helper function to wrap typical calls to zpool and ignores stdout.
+func zpool(arg ...string) error {
+ _, err := zpoolOutput(arg...)
+ return err
+}
+
// zpool is a helper function to wrap typical calls to zpool.
-func zpool(arg ...string) ([][]string, error) {
+func zpoolOutput(arg ...string) ([][]string, error) {
c := command{Command: "zpool"}
return c.Run(arg...)
}
@@ -37,7 +44,7 @@ func zpool(arg ...string) ([][]string, error) {
func GetZpool(name string) (*Zpool, error) {
args := zpoolArgs
args = append(args, name)
- out, err := zpool(args...)
+ out, err := zpoolOutput(args...)
if err != nil {
return nil, err
}
@@ -65,10 +72,11 @@ func (z *Zpool) Snapshots() ([]*Dataset, error) {
return Snapshots(z.Name)
}
-// CreateZpool creates a new ZFS zpool with the specified name, properties,
-// and optional arguments.
-// A full list of available ZFS properties and command-line arguments may be
-// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8).
+// CreateZpool creates a new ZFS zpool with the specified name, properties, and optional arguments.
+//
+// A full list of available ZFS properties and command-line arguments may be found in the ZFS manual:
+// https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html.
+// https://openzfs.github.io/openzfs-docs/man/8/zpool-create.8.html
func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) {
cli := make([]string, 1, 4)
cli[0] = "create"
@@ -77,8 +85,7 @@ func CreateZpool(name string, properties map[string]string, args ...string) (*Zp
}
cli = append(cli, name)
cli = append(cli, args...)
- _, err := zpool(cli...)
- if err != nil {
+ if err := zpool(cli...); err != nil {
return nil, err
}
@@ -87,14 +94,14 @@ func CreateZpool(name string, properties map[string]string, args ...string) (*Zp
// Destroy destroys a ZFS zpool by name.
func (z *Zpool) Destroy() error {
- _, err := zpool("destroy", z.Name)
+ err := zpool("destroy", z.Name)
return err
}
// ListZpools list all ZFS zpools accessible on the current system.
func ListZpools() ([]*Zpool, error) {
args := []string{"list", "-Ho", "name"}
- out, err := zpool(args...)
+ out, err := zpoolOutput(args...)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go
index 21c268550..9828192c4 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go
@@ -13,6 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package cryptoutils TODO: add meaningfull description
package cryptoutils
import (
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go
index 31011f34c..89dd05e01 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go
@@ -18,7 +18,7 @@ package cryptoutils
import (
"errors"
"fmt"
- "io/ioutil"
+ "io"
"os"
"golang.org/x/term"
@@ -50,7 +50,7 @@ func readPasswordFn() func() ([]byte, error) {
}
// Handle piped in passwords.
return func() ([]byte, error) {
- return ioutil.ReadAll(os.Stdin)
+ return io.ReadAll(os.Stdin)
}
}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go
index d97bf36bf..b1a0dad05 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go
@@ -31,7 +31,11 @@ import (
const (
// PrivateKeyPEMType is the string "PRIVATE KEY" to be used during PEM encoding and decoding
- PrivateKeyPEMType PEMType = "PRIVATE KEY"
+ PrivateKeyPEMType PEMType = "PRIVATE KEY"
+ // ECPrivateKeyPEMType is the string "EC PRIVATE KEY" used to parse SEC 1 EC private keys
+ ECPrivateKeyPEMType PEMType = "EC PRIVATE KEY"
+ // PKCS1PrivateKeyPEMType is the string "RSA PRIVATE KEY" used to parse PKCS#1-encoded private keys
+ PKCS1PrivateKeyPEMType PEMType = "RSA PRIVATE KEY"
encryptedCosignPrivateKeyPEMType PEMType = "ENCRYPTED COSIGN PRIVATE KEY"
// EncryptedSigstorePrivateKeyPEMType is the string "ENCRYPTED SIGSTORE PRIVATE KEY" to be used during PEM encoding and decoding
EncryptedSigstorePrivateKeyPEMType PEMType = "ENCRYPTED SIGSTORE PRIVATE KEY"
@@ -106,6 +110,10 @@ func UnmarshalPEMToPrivateKey(pemBytes []byte, pf PassFunc) (crypto.PrivateKey,
switch derBlock.Type {
case string(PrivateKeyPEMType):
return x509.ParsePKCS8PrivateKey(derBlock.Bytes)
+ case string(PKCS1PrivateKeyPEMType):
+ return x509.ParsePKCS1PrivateKey(derBlock.Bytes)
+ case string(ECPrivateKeyPEMType):
+ return x509.ParseECPrivateKey(derBlock.Bytes)
case string(EncryptedSigstorePrivateKeyPEMType), string(encryptedCosignPrivateKeyPEMType):
derBytes := derBlock.Bytes
if pf != nil {
@@ -123,7 +131,7 @@ func UnmarshalPEMToPrivateKey(pemBytes []byte, pf PassFunc) (crypto.PrivateKey,
return x509.ParsePKCS8PrivateKey(derBytes)
}
- return nil, fmt.Errorf("unknown PEM file type: %v", derBlock.Type)
+ return nil, fmt.Errorf("unknown private key PEM file type: %v", derBlock.Type)
}
// MarshalPrivateKeyToDER converts a crypto.PrivateKey into a PKCS8 ASN.1 DER byte slice
@@ -134,7 +142,7 @@ func MarshalPrivateKeyToDER(priv crypto.PrivateKey) ([]byte, error) {
return x509.MarshalPKCS8PrivateKey(priv)
}
-// MarshalPrivateKeyToPEM converts a crypto.PrivateKey into a PEM-encoded byte slice
+// MarshalPrivateKeyToPEM converts a crypto.PrivateKey into a PKCS#8 PEM-encoded byte slice
func MarshalPrivateKeyToPEM(priv crypto.PrivateKey) ([]byte, error) {
derBytes, err := MarshalPrivateKeyToDER(priv)
if err != nil {
diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
index e9f48decb..d2b94d4d9 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go
@@ -37,6 +37,8 @@ import (
const (
// PublicKeyPEMType is the string "PUBLIC KEY" to be used during PEM encoding and decoding
PublicKeyPEMType PEMType = "PUBLIC KEY"
+ // PKCS1PublicKeyPEMType is the string "RSA PUBLIC KEY" used to parse PKCS#1-encoded public keys
+ PKCS1PublicKeyPEMType PEMType = "RSA PUBLIC KEY"
)
// subjectPublicKeyInfo is used to construct a subject key ID.
@@ -55,6 +57,8 @@ func UnmarshalPEMToPublicKey(pemBytes []byte) (crypto.PublicKey, error) {
switch derBytes.Type {
case string(PublicKeyPEMType):
return x509.ParsePKIXPublicKey(derBytes.Bytes)
+ case string(PKCS1PublicKeyPEMType):
+ return x509.ParsePKCS1PublicKey(derBytes.Bytes)
default:
return nil, fmt.Errorf("unknown Public key PEM file type: %v. Are you passing the correct public key?",
derBytes.Type)
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go
index 903e6261b..be39c3f76 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/context.go
@@ -13,6 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package options TODO: add meaningfull description
package options
import (
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
index c58368433..422e5cd99 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go
@@ -13,6 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package payload TODO: add meaningfull description
package payload
import (
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
index 6dad67d08..3bd3823cb 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
@@ -22,7 +22,7 @@ import (
"crypto/rsa"
"errors"
"io"
- "io/ioutil"
+ "os"
"path/filepath"
// these ensure we have the implementations loaded
@@ -77,7 +77,7 @@ func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, err
// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() and
// cryptoutils.UnmarshalPEMToPrivateKey() methods directly.
func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (Signer, error) {
- fileBytes, err := ioutil.ReadFile(filepath.Clean(path))
+ fileBytes, err := os.ReadFile(filepath.Clean(path))
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go
index 9592654ed..90667f2a8 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go
@@ -21,7 +21,7 @@ import (
"crypto/ed25519"
"crypto/rsa"
"errors"
- "io/ioutil"
+ "os"
"path/filepath"
"github.com/sigstore/sigstore/pkg/cryptoutils"
@@ -57,7 +57,7 @@ func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Sig
// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() and
// cryptoutils.UnmarshalPEMToPrivateKey() methods directly.
func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.PassFunc) (SignerVerifier, error) {
- fileBytes, err := ioutil.ReadFile(filepath.Clean(path))
+ fileBytes, err := os.ReadFile(filepath.Clean(path))
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go
index ea8660efc..9ca604929 100644
--- a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go
+++ b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go
@@ -22,7 +22,7 @@ import (
"crypto/rsa"
"errors"
"io"
- "io/ioutil"
+ "os"
"path/filepath"
"github.com/sigstore/sigstore/pkg/cryptoutils"
@@ -86,7 +86,7 @@ func LoadUnsafeVerifier(publicKey crypto.PublicKey) (Verifier, error) {
// If the publickey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a
// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() and cryptoutils.UnmarshalPEMToPublicKey() methods directly.
func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error) {
- fileBytes, err := ioutil.ReadFile(filepath.Clean(path))
+ fileBytes, err := os.ReadFile(filepath.Clean(path))
if err != nil {
return nil, err
}
diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
index a2ecf5c32..93eb5ae6d 100644
--- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
+++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go
@@ -12,7 +12,7 @@ import (
"errors"
"math/bits"
- "golang.org/x/crypto/internal/subtle"
+ "golang.org/x/crypto/internal/alias"
)
const (
@@ -189,7 +189,7 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
panic("chacha20: output smaller than input")
}
dst = dst[:len(src)]
- if subtle.InexactOverlap(dst, src) {
+ if alias.InexactOverlap(dst, src) {
panic("chacha20: invalid buffer overlap")
}
diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/alias/alias.go
index 4fad24f8d..69c17f822 100644
--- a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go
+++ b/vendor/golang.org/x/crypto/internal/alias/alias.go
@@ -5,9 +5,8 @@
//go:build !purego
// +build !purego
-// Package subtle implements functions that are often useful in cryptographic
-// code but require careful thought to use correctly.
-package subtle // import "golang.org/x/crypto/internal/subtle"
+// Package alias implements memory aliasing tests.
+package alias
import "unsafe"
diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
index 80ccbed2c..4775b0a43 100644
--- a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go
+++ b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go
@@ -5,9 +5,8 @@
//go:build purego
// +build purego
-// Package subtle implements functions that are often useful in cryptographic
-// code but require careful thought to use correctly.
-package subtle // import "golang.org/x/crypto/internal/subtle"
+// Package alias implements memory aliasing tests.
+package alias
// This is the Google App Engine standard variant based on reflect
// because the unsafe package and cgo are disallowed.
diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
index a2973e626..f3c3242a0 100644
--- a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
+++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
@@ -35,8 +35,8 @@ This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
package secretbox // import "golang.org/x/crypto/nacl/secretbox"
import (
+ "golang.org/x/crypto/internal/alias"
"golang.org/x/crypto/internal/poly1305"
- "golang.org/x/crypto/internal/subtle"
"golang.org/x/crypto/salsa20/salsa"
)
@@ -88,7 +88,7 @@ func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
copy(poly1305Key[:], firstBlock[:])
ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
- if subtle.AnyOverlap(out, message) {
+ if alias.AnyOverlap(out, message) {
panic("nacl: invalid buffer overlap")
}
@@ -147,7 +147,7 @@ func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
}
ret, out := sliceForAppend(out, len(box)-Overhead)
- if subtle.AnyOverlap(out, box) {
+ if alias.AnyOverlap(out, box) {
panic("nacl: invalid buffer overlap")
}
diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go
index 3c4d18a15..eb6bc7179 100644
--- a/vendor/golang.org/x/crypto/ssh/agent/client.go
+++ b/vendor/golang.org/x/crypto/ssh/agent/client.go
@@ -226,7 +226,9 @@ var ErrExtensionUnsupported = errors.New("agent: extension unsupported")
type extensionAgentMsg struct {
ExtensionType string `sshtype:"27"`
- Contents []byte
+ // NOTE: this matches OpenSSH's PROTOCOL.agent, not the IETF draft [PROTOCOL.agent],
+ // so that it matches what OpenSSH actually implements in the wild.
+ Contents []byte `ssh:"rest"`
}
// Key represents a protocol 2 public key as defined in
diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go
index b46d81ca6..497e13204 100644
--- a/vendor/golang.org/x/net/html/render.go
+++ b/vendor/golang.org/x/net/html/render.go
@@ -85,7 +85,7 @@ func render1(w writer, n *Node) error {
if _, err := w.WriteString("<!--"); err != nil {
return err
}
- if _, err := w.WriteString(n.Data); err != nil {
+ if err := escape(w, n.Data); err != nil {
return err
}
if _, err := w.WriteString("-->"); err != nil {
@@ -96,7 +96,7 @@ func render1(w writer, n *Node) error {
if _, err := w.WriteString("<!DOCTYPE "); err != nil {
return err
}
- if _, err := w.WriteString(n.Data); err != nil {
+ if err := escape(w, n.Data); err != nil {
return err
}
if n.Attr != nil {
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
index 877709f99..be3c75414 100644
--- a/vendor/golang.org/x/net/html/token.go
+++ b/vendor/golang.org/x/net/html/token.go
@@ -110,9 +110,9 @@ func (t Token) String() string {
case SelfClosingTagToken:
return "<" + t.tagString() + "/>"
case CommentToken:
- return "<!--" + t.Data + "-->"
+ return "<!--" + EscapeString(t.Data) + "-->"
case DoctypeToken:
- return "<!DOCTYPE " + t.Data + ">"
+ return "<!DOCTYPE " + EscapeString(t.Data) + ">"
}
return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 0178647ee..184ac45fe 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -23,7 +23,7 @@ const frameHeaderLen = 9
var padZeros = make([]byte, 255) // zeros for padding
// A FrameType is a registered frame type as defined in
-// http://http2.github.io/http2-spec/#rfc.section.11.2
+// https://httpwg.org/specs/rfc7540.html#rfc.section.11.2
type FrameType uint8
const (
@@ -146,7 +146,7 @@ func typeFrameParser(t FrameType) frameParser {
// A FrameHeader is the 9 byte header of all HTTP/2 frames.
//
-// See http://http2.github.io/http2-spec/#FrameHeader
+// See https://httpwg.org/specs/rfc7540.html#FrameHeader
type FrameHeader struct {
valid bool // caller can access []byte fields in the Frame
@@ -575,7 +575,7 @@ func (fr *Framer) checkFrameOrder(f Frame) error {
// A DataFrame conveys arbitrary, variable-length sequences of octets
// associated with a stream.
-// See http://http2.github.io/http2-spec/#rfc.section.6.1
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.1
type DataFrame struct {
FrameHeader
data []byte
@@ -698,7 +698,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
// endpoints communicate, such as preferences and constraints on peer
// behavior.
//
-// See http://http2.github.io/http2-spec/#SETTINGS
+// See https://httpwg.org/specs/rfc7540.html#SETTINGS
type SettingsFrame struct {
FrameHeader
p []byte
@@ -837,7 +837,7 @@ func (f *Framer) WriteSettingsAck() error {
// A PingFrame is a mechanism for measuring a minimal round trip time
// from the sender, as well as determining whether an idle connection
// is still functional.
-// See http://http2.github.io/http2-spec/#rfc.section.6.7
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.7
type PingFrame struct {
FrameHeader
Data [8]byte
@@ -870,7 +870,7 @@ func (f *Framer) WritePing(ack bool, data [8]byte) error {
}
// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
-// See http://http2.github.io/http2-spec/#rfc.section.6.8
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.8
type GoAwayFrame struct {
FrameHeader
LastStreamID uint32
@@ -934,7 +934,7 @@ func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p
}
// A WindowUpdateFrame is used to implement flow control.
-// See http://http2.github.io/http2-spec/#rfc.section.6.9
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.9
type WindowUpdateFrame struct {
FrameHeader
Increment uint32 // never read with high bit set
@@ -1123,7 +1123,7 @@ func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
}
// A PriorityFrame specifies the sender-advised priority of a stream.
-// See http://http2.github.io/http2-spec/#rfc.section.6.3
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.3
type PriorityFrame struct {
FrameHeader
PriorityParam
@@ -1193,7 +1193,7 @@ func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
}
// A RSTStreamFrame allows for abnormal termination of a stream.
-// See http://http2.github.io/http2-spec/#rfc.section.6.4
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.4
type RSTStreamFrame struct {
FrameHeader
ErrCode ErrCode
@@ -1225,7 +1225,7 @@ func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
}
// A ContinuationFrame is used to continue a sequence of header block fragments.
-// See http://http2.github.io/http2-spec/#rfc.section.6.10
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.10
type ContinuationFrame struct {
FrameHeader
headerFragBuf []byte
@@ -1266,7 +1266,7 @@ func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlock
}
// A PushPromiseFrame is used to initiate a server stream.
-// See http://http2.github.io/http2-spec/#rfc.section.6.6
+// See https://httpwg.org/specs/rfc7540.html#rfc.section.6.6
type PushPromiseFrame struct {
FrameHeader
PromiseID uint32
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
index 97f17831f..6886dc163 100644
--- a/vendor/golang.org/x/net/http2/hpack/encode.go
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -191,7 +191,7 @@ func appendTableSize(dst []byte, v uint32) []byte {
// bit prefix, to dst and returns the extended buffer.
//
// See
-// http://http2.github.io/http2-spec/compression.html#integer.representation
+// https://httpwg.org/specs/rfc7541.html#integer.representation
func appendVarInt(dst []byte, n byte, i uint64) []byte {
k := uint64((1 << n) - 1)
if i < k {
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
index 85f18a2b0..ebdfbee96 100644
--- a/vendor/golang.org/x/net/http2/hpack/hpack.go
+++ b/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -59,7 +59,7 @@ func (hf HeaderField) String() string {
// Size returns the size of an entry per RFC 7541 section 4.1.
func (hf HeaderField) Size() uint32 {
- // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.4.1
// "The size of the dynamic table is the sum of the size of
// its entries. The size of an entry is the sum of its name's
// length in octets (as defined in Section 5.2), its value's
@@ -158,7 +158,7 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
}
type dynamicTable struct {
- // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.2.3.2
table headerFieldTable
size uint32 // in bytes
maxSize uint32 // current maxSize
@@ -307,27 +307,27 @@ func (d *Decoder) parseHeaderFieldRepr() error {
case b&128 != 0:
// Indexed representation.
// High bit set?
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.1
return d.parseFieldIndexed()
case b&192 == 64:
// 6.2.1 Literal Header Field with Incremental Indexing
// 0b10xxxxxx: top two bits are 10
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.1
return d.parseFieldLiteral(6, indexedTrue)
case b&240 == 0:
// 6.2.2 Literal Header Field without Indexing
// 0b0000xxxx: top four bits are 0000
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.2
return d.parseFieldLiteral(4, indexedFalse)
case b&240 == 16:
// 6.2.3 Literal Header Field never Indexed
// 0b0001xxxx: top four bits are 0001
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.3
return d.parseFieldLiteral(4, indexedNever)
case b&224 == 32:
// 6.3 Dynamic Table Size Update
// Top three bits are '001'.
- // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
+ // https://httpwg.org/specs/rfc7541.html#rfc.section.6.3
return d.parseDynamicTableSizeUpdate()
}
@@ -420,7 +420,7 @@ var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
// readVarInt reads an unsigned variable length integer off the
// beginning of p. n is the parameter as described in
-// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+// https://httpwg.org/specs/rfc7541.html#rfc.section.5.1.
//
// n must always be between 1 and 8.
//
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 479ba4b2b..6f2df2818 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -55,14 +55,14 @@ const (
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
// SETTINGS_MAX_FRAME_SIZE default
- // http://http2.github.io/http2-spec/#rfc.section.6.5.2
+ // https://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2
initialMaxFrameSize = 16384
// NextProtoTLS is the NPN/ALPN protocol negotiated during
// HTTP/2's TLS setup.
NextProtoTLS = "h2"
- // http://http2.github.io/http2-spec/#SettingValues
+ // https://httpwg.org/specs/rfc7540.html#SettingValues
initialHeaderTableSize = 4096
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
@@ -111,7 +111,7 @@ func (st streamState) String() string {
// Setting is a setting parameter: which setting it is, and its value.
type Setting struct {
// ID is which setting is being set.
- // See http://http2.github.io/http2-spec/#SettingValues
+ // See https://httpwg.org/specs/rfc7540.html#SettingFormat
ID SettingID
// Val is the value.
@@ -143,7 +143,7 @@ func (s Setting) Valid() error {
}
// A SettingID is an HTTP/2 setting as defined in
-// http://http2.github.io/http2-spec/#iana-settings
+// https://httpwg.org/specs/rfc7540.html#iana-settings
type SettingID uint16
const (
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 47524a61a..aa3b0864e 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -1747,6 +1747,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ if sc.inflow.available() < int32(f.Length) {
+ return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
+ }
+ sc.inflow.take(int32(f.Length))
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
// value of a content-length header field does not equal the sum of the
@@ -2223,6 +2229,9 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler
didPanic := true
defer func() {
rw.rws.stream.cancelCtx()
+ if req.MultipartForm != nil {
+ req.MultipartForm.RemoveAll()
+ }
if didPanic {
e := recover()
sc.writeFrameFromHandler(FrameWriteRequest{
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 4ded4dfd5..90fdc28cf 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -67,13 +67,23 @@ const (
// A Transport internally caches connections to servers. It is safe
// for concurrent use by multiple goroutines.
type Transport struct {
- // DialTLS specifies an optional dial function for creating
- // TLS connections for requests.
+ // DialTLSContext specifies an optional dial function with context for
+ // creating TLS connections for requests.
//
- // If DialTLS is nil, tls.Dial is used.
+ // If DialTLSContext and DialTLS is nil, tls.Dial is used.
//
// If the returned net.Conn has a ConnectionState method like tls.Conn,
// it will be used to set http.Response.TLS.
+ DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for requests.
+ //
+ // If DialTLSContext and DialTLS is nil, tls.Dial is used.
+ //
+ // Deprecated: Use DialTLSContext instead, which allows the transport
+ // to cancel dials as soon as they are no longer needed.
+ // If both are set, DialTLSContext takes priority.
DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
// TLSClientConfig specifies the TLS configuration to use with
@@ -592,7 +602,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
if err != nil {
return nil, err
}
- tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host))
+ tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host))
if err != nil {
return nil, err
}
@@ -613,24 +623,25 @@ func (t *Transport) newTLSConfig(host string) *tls.Config {
return cfg
}
-func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) {
- if t.DialTLS != nil {
- return t.DialTLS
+func (t *Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) {
+ if t.DialTLSContext != nil {
+ return t.DialTLSContext(ctx, network, addr, tlsCfg)
+ } else if t.DialTLS != nil {
+ return t.DialTLS(network, addr, tlsCfg)
}
- return func(network, addr string, cfg *tls.Config) (net.Conn, error) {
- tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg)
- if err != nil {
- return nil, err
- }
- state := tlsCn.ConnectionState()
- if p := state.NegotiatedProtocol; p != NextProtoTLS {
- return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
- }
- if !state.NegotiatedProtocolIsMutual {
- return nil, errors.New("http2: could not negotiate protocol mutually")
- }
- return tlsCn, nil
+
+ tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg)
+ if err != nil {
+ return nil, err
+ }
+ state := tlsCn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != NextProtoTLS {
+ return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("http2: could not negotiate protocol mutually")
}
+ return tlsCn, nil
}
// disableKeepAlives reports whether connections should be closed as
diff --git a/vendor/modules.txt b/vendor/modules.txt
index f06b92015..17e889387 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -97,7 +97,7 @@ github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v1.1.1
## explicit; go 1.17
github.com/containernetworking/plugins/pkg/ns
-# github.com/containers/buildah v1.27.1-0.20220907121344-97a52b13bb27
+# github.com/containers/buildah v1.27.1-0.20220921131114-d3064796af36
## explicit; go 1.17
github.com/containers/buildah
github.com/containers/buildah/bind
@@ -120,7 +120,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
-# github.com/containers/common v0.49.2-0.20220909190843-e5685792b5d7
+# github.com/containers/common v0.49.2-0.20220920205255-8062f81c5497
## explicit; go 1.17
github.com/containers/common/libimage
github.com/containers/common/libimage/define
@@ -174,7 +174,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
-# github.com/containers/image/v5 v5.22.1-0.20220907162003-651744379993
+# github.com/containers/image/v5 v5.22.1-0.20220919112403-fe51f7ffca50
## explicit; go 1.17
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@@ -266,7 +266,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
-# github.com/containers/storage v1.42.1-0.20220911223137-e11b246de159
+# github.com/containers/storage v1.42.1-0.20220919112236-8a581aac3bdf
## explicit; go 1.16
github.com/containers/storage
github.com/containers/storage/drivers
@@ -438,7 +438,7 @@ github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
-# github.com/google/go-cmp v0.5.8
+# github.com/google/go-cmp v0.5.9
## explicit; go 1.13
github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/internal/diff
@@ -476,8 +476,6 @@ github.com/hashicorp/errwrap
# github.com/hashicorp/go-multierror v1.1.1
## explicit; go 1.13
github.com/hashicorp/go-multierror
-# github.com/honeycombio/libhoney-go v1.15.8
-## explicit; go 1.14
# github.com/imdario/mergo v0.3.13
## explicit; go 1.13
github.com/imdario/mergo
@@ -490,8 +488,8 @@ github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.15.9
-## explicit; go 1.16
+# github.com/klauspost/compress v1.15.10
+## explicit; go 1.17
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@@ -534,9 +532,9 @@ github.com/mattn/go-shellwords
# github.com/miekg/pkcs11 v1.1.1
## explicit; go 1.12
github.com/miekg/pkcs11
-# github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
-## explicit
-github.com/mistifyio/go-zfs
+# github.com/mistifyio/go-zfs/v3 v3.0.0
+## explicit; go 1.14
+github.com/mistifyio/go-zfs/v3
# github.com/moby/sys/mount v0.3.3
## explicit; go 1.16
github.com/moby/sys/mount
@@ -692,7 +690,7 @@ github.com/rootless-containers/rootlesskit/pkg/port/portutil
# github.com/seccomp/libseccomp-golang v0.10.0
## explicit; go 1.14
github.com/seccomp/libseccomp-golang
-# github.com/sigstore/sigstore v1.4.0
+# github.com/sigstore/sigstore v1.4.1
## explicit; go 1.17
github.com/sigstore/sigstore/pkg/cryptoutils
github.com/sigstore/sigstore/pkg/signature
@@ -724,7 +722,7 @@ github.com/syndtr/gocapability/capability
# github.com/tchap/go-patricia v2.3.0+incompatible
## explicit
github.com/tchap/go-patricia/patricia
-# github.com/theupdateframework/go-tuf v0.4.0
+# github.com/theupdateframework/go-tuf v0.5.0
## explicit; go 1.18
github.com/theupdateframework/go-tuf/encrypted
# github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399
@@ -762,6 +760,8 @@ github.com/vishvananda/netlink/nl
# github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
## explicit; go 1.12
github.com/vishvananda/netns
+# github.com/vmihailenco/msgpack/v5 v5.3.5
+## explicit; go 1.11
# github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb
## explicit
github.com/xeipuuv/gojsonpointer
@@ -784,7 +784,7 @@ go.opencensus.io/internal
go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/tracestate
-# golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
+# golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90
## explicit; go 1.17
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
@@ -792,8 +792,8 @@ golang.org/x/crypto/chacha20
golang.org/x/crypto/curve25519
golang.org/x/crypto/curve25519/internal/field
golang.org/x/crypto/ed25519
+golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
-golang.org/x/crypto/internal/subtle
golang.org/x/crypto/nacl/secretbox
golang.org/x/crypto/ocsp
golang.org/x/crypto/openpgp
@@ -810,7 +810,7 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/crypto/ssh/knownhosts
-# golang.org/x/net v0.0.0-20220722155237-a158d28d115b
+# golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b
## explicit; go 1.17
golang.org/x/net/context
golang.org/x/net/html